summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS4
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst20
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/arm64/memory.rst9
-rw-r--r--Documentation/arm64/silicon-errata.rst2
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/memory-allocation.rst4
-rw-r--r--Documentation/core-api/symbol-namespaces.rst (renamed from Documentation/kbuild/namespaces.rst)0
-rw-r--r--Documentation/dev-tools/kasan.rst3
-rw-r--r--Documentation/dev-tools/kselftest.rst16
-rw-r--r--Documentation/devicetree/bindings/display/arm,malidp.txt3
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt30
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt7
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,dwc3.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/usb-hcd.yaml5
-rw-r--r--Documentation/devicetree/bindings/usb/usb-uhci.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt4
-rw-r--r--Documentation/driver-api/dma-buf.rst6
-rw-r--r--Documentation/gpu/amdgpu.rst30
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst3
-rw-r--r--Documentation/gpu/todo.rst127
-rw-r--r--Documentation/hwmon/index.rst1
-rw-r--r--Documentation/hwmon/inspur-ipsps1.rst2
-rw-r--r--Documentation/hwmon/k10temp.rst18
-rw-r--r--Documentation/networking/device_drivers/pensando/ionic.rst4
-rw-r--r--Documentation/networking/net_dim.txt36
-rw-r--r--Documentation/process/coding-style.rst2
-rw-r--r--Documentation/process/deprecated.rst33
-rw-r--r--Documentation/usb/rio.rst109
-rw-r--r--MAINTAINERS25
-rw-r--r--Makefile11
-rw-r--r--arch/arm/boot/dts/mt7629-rfb.dts13
-rw-r--r--arch/arm/boot/dts/mt7629.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi3
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi4
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi5
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi6
-rw-r--r--arch/arm/configs/badge4_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig1
-rw-r--r--arch/arm/crypto/aes-ce-core.S1
-rw-r--r--arch/arm64/Kconfig32
-rw-r--r--arch/arm64/Makefile16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi2
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h7
-rw-r--r--arch/arm64/include/asm/atomic_lse.h6
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/vdso/compat_barrier.h2
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h33
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c5
-rw-r--r--arch/arm64/kernel/cpu_errata.c42
-rw-r--r--arch/arm64/kernel/cpufeature.c16
-rw-r--r--arch/arm64/kernel/entry.S9
-rw-r--r--arch/arm64/kernel/ftrace.c12
-rw-r--r--arch/arm64/kernel/hibernate.c9
-rw-r--r--arch/arm64/kernel/process.c50
-rw-r--r--arch/arm64/kernel/vdso32/Makefile44
-rw-r--r--arch/arm64/kvm/hyp/switch.c69
-rw-r--r--arch/arm64/mm/fault.c19
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/fw/sni/sniprom.c2
-rw-r--r--arch/mips/include/asm/cmpxchg.h9
-rw-r--r--arch/mips/include/uapi/asm/hwcap.h11
-rw-r--r--arch/mips/kernel/cpu-probe.c33
-rw-r--r--arch/mips/loongson64/Platform4
-rw-r--r--arch/mips/vdso/Makefile1
-rw-r--r--arch/parisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/mm/ioremap.c12
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c3
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts1
-rw-r--r--arch/riscv/include/asm/pgtable.h16
-rw-r--r--arch/riscv/include/asm/tlbflush.h4
-rw-r--r--arch/riscv/kernel/entry.S3
-rw-r--r--arch/riscv/kernel/traps.c18
-rw-r--r--arch/s390/include/asm/uaccess.h4
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/x86/boot/compressed/acpi.c48
-rw-r--r--arch/x86/boot/compressed/misc.c25
-rw-r--r--arch/x86/events/amd/core.c30
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/intel/cstate.c44
-rw-r--r--arch/x86/events/msr.c7
-rw-r--r--arch/x86/hyperv/hv_apic.c20
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h2
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/pti.h2
-rw-r--r--arch/x86/include/asm/uaccess.h23
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c3
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c4
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/head64.c22
-rw-r--r--arch/x86/kernel/process.h2
-rw-r--r--arch/x86/lib/delay.c4
-rw-r--r--arch/x86/platform/efi/efi.c3
-rw-r--r--arch/x86/xen/enlighten.c28
-rw-r--r--arch/xtensa/boot/dts/virt.dts2
-rw-r--r--arch/xtensa/include/asm/bitops.h2
-rw-r--r--arch/xtensa/include/asm/uaccess.h94
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c7
-rw-r--r--block/blk-cgroup.c69
-rw-r--r--block/blk-rq-qos.c14
-rw-r--r--block/blk-rq-qos.h17
-rw-r--r--block/blk-wbt.c6
-rw-r--r--block/elevator.c3
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/hmat/hmat.c2
-rw-r--r--drivers/acpi/processor_perflib.c10
-rw-r--r--drivers/acpi/processor_thermal.c10
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/platform.c46
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/null_blk_zoned.c3
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/dma-buf/dma-buf.c120
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/cper.c2
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/rci2-table.c2
-rw-r--r--drivers/firmware/efi/tpm.c26
-rw-r--r--drivers/firmware/google/vpd_decode.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-intel-mid.c9
-rw-r--r--drivers/gpio/gpio-lynxpoint.c10
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c9
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpu/drm/Kconfig21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c286
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c248
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c425
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c585
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c231
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c308
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c417
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c380
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c248
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c65
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/display/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c233
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c346
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c273
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c619
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c326
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c426
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h442
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c531
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c305
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c163
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h139
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c328
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h272
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h289
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c98
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c93
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h49
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h92
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h176
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h27
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h34
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c374
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c50
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c463
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c128
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c59
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c135
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c29
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c46
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h10
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c26
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c92
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h17
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c36
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c83
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h37
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c70
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h247
-rw-r--r--drivers/gpu/drm/drm_blend.c8
-rw-r--r--drivers/gpu/drm/drm_cache.c14
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c6
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c169
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1184
-rw-r--r--drivers/gpu/drm/drm_edid.c117
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c61
-rw-r--r--drivers/gpu/drm/drm_gem.c30
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c17
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c187
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c37
-rw-r--r--drivers/gpu/drm/drm_vblank.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c28
-rw-r--r--drivers/gpu/drm/lima/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c22
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c195
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h32
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c46
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c1
-rw-r--r--drivers/gpu/drm/lima/lima_object.c119
-rw-r--r--drivers/gpu/drm/lima/lima_object.h35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c87
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c61
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c37
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c67
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c233
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c288
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c149
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c70
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c20
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c46
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c9
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c9
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c9
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c9
-rw-r--r--drivers/gpu/drm/panfrost/TODO2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c126
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c33
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c19
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c19
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c169
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c48
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c43
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dp.c876
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c208
-rw-r--r--drivers/gpu/drm/tegra/drm.c417
-rw-r--r--drivers/gpu/drm/tegra/drm.h13
-rw-r--r--drivers/gpu/drm/tegra/falcon.c64
-rw-r--r--drivers/gpu/drm/tegra/falcon.h16
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c81
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c12
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c12
-rw-r--r--drivers/gpu/drm/tegra/hub.c6
-rw-r--r--drivers/gpu/drm/tegra/output.c28
-rw-r--r--drivers/gpu/drm/tegra/plane.c104
-rw-r--r--drivers/gpu/drm/tegra/plane.h8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1704
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tegra/vic.c138
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c185
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c79
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h25
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c119
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c138
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c12
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c12
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c7
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/cdma.c6
-rw-r--r--drivers/gpu/host1x/channel.c13
-rw-r--r--drivers/gpu/host1x/channel.h1
-rw-r--r--drivers/gpu/host1x/dev.c236
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/intr.c1
-rw-r--r--drivers/gpu/host1x/job.c91
-rw-r--r--drivers/gpu/host1x/job.h4
-rw-r--r--drivers/hid/hid-hyperv.c56
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/nct7904.c33
-rw-r--r--drivers/iio/accel/adxl372.c22
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c2
-rw-r--r--drivers/iio/adc/ad799x.c4
-rw-r--r--drivers/iio/adc/axp288_adc.c32
-rw-r--r--drivers/iio/adc/hx711.c10
-rw-r--r--drivers/iio/adc/meson_saradc.c10
-rw-r--r--drivers/iio/adc/stm32-adc-core.c70
-rw-r--r--drivers/iio/adc/stm32-adc-core.h137
-rw-r--r--drivers/iio/adc/stm32-adc.c109
-rw-r--r--drivers/iio/imu/adis_buffer.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c28
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c15
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/light/vcnl4000.c14
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/device.c9
-rw-r--r--drivers/infiniband/core/nldev.c12
-rw-r--r--drivers/infiniband/core/security.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c58
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c68
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c58
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c15
-rw-r--r--drivers/input/misc/da9063_onkey.c5
-rw-r--r--drivers/input/misc/soc_button_array.c17
-rw-r--r--drivers/input/mouse/elantech.c55
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/touchscreen/goodix.c58
-rw-r--r--drivers/iommu/amd_iommu.c12
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu.c1
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c19
-rw-r--r--drivers/irqchip/irq-al-fic.c12
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c29
-rw-r--r--drivers/md/dm-cache-target.c28
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-snap.c94
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c3
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/misc/fastrpc.c1
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c21
-rw-r--r--drivers/misc/mei/hw-me.h8
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c13
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/sdhci-iproc.c1
-rw-r--r--drivers/mmc/host/sh_mmcif.c6
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c5
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c7
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c6
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h20
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c41
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c117
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c50
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c29
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/netdevsim/fib.c3
-rw-r--r--drivers/net/phy/bcm7xxx.c1
-rw-r--r--drivers/net/phy/micrel.c42
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/phy/phylink.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/lan78xx.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/xen-netback/interface.c1
-rw-r--r--drivers/nfc/pn533/usb.c9
-rw-r--r--drivers/nvme/host/core.c94
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c83
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/nvme/host/tcp.c11
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/parisc/sba_iommu.c8
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/platform/x86/classmate-laptop.c12
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c1
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c3
-rw-r--r--drivers/ptp/Kconfig4
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c7
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/staging/exfat/Kconfig3
-rw-r--r--drivers/staging/exfat/Makefile2
-rw-r--r--drivers/staging/exfat/exfat.h2
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c2
-rw-r--r--drivers/staging/exfat/exfat_cache.c2
-rw-r--r--drivers/staging/exfat/exfat_core.c2
-rw-r--r--drivers/staging/exfat/exfat_nls.c2
-rw-r--r--drivers/staging/exfat/exfat_super.c7
-rw-r--r--drivers/staging/exfat/exfat_upcase.c2
-rw-r--r--drivers/staging/fbtft/Kconfig12
-rw-r--r--drivers/staging/fbtft/Makefile4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c7
-rw-r--r--drivers/staging/fbtft/fbtft_device.c1261
-rw-r--r--drivers/staging/fbtft/flexfb.c851
-rw-r--r--drivers/staging/octeon/ethernet-tx.c9
-rw-r--r--drivers/staging/octeon/octeon-stubs.h2
-rw-r--r--drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c6
-rw-r--r--drivers/staging/speakup/sysfs-driver-speakup369
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c1
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/tty/n_hdlc.c5
-rw-r--r--drivers/tty/serial/8250/8250_omap.c5
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c21
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c3
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c3
-rw-r--r--drivers/usb/cdns3/core.c20
-rw-r--r--drivers/usb/cdns3/ep0.c12
-rw-r--r--drivers/usb/cdns3/gadget.c8
-rw-r--r--drivers/usb/class/usblp.c8
-rw-r--r--drivers/usb/dwc3/drd.c7
-rw-r--r--drivers/usb/dwc3/gadget.c7
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c3
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.c1
-rw-r--r--drivers/usb/host/xhci-ring.c4
-rw-r--r--drivers/usb/host/xhci.c78
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/adutux.c24
-rw-r--r--drivers/usb/misc/chaoskey.c5
-rw-r--r--drivers/usb/misc/iowarrior.c48
-rw-r--r--drivers/usb/misc/ldusb.c24
-rw-r--r--drivers/usb/misc/legousbtower.c58
-rw-r--r--drivers/usb/misc/rio500.c554
-rw-r--r--drivers/usb/misc/rio500_usb.h20
-rw-r--r--drivers/usb/misc/usblcd.c60
-rw-r--r--drivers/usb/misc/yurex.c18
-rw-r--r--drivers/usb/renesas_usbhs/common.h1
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c18
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c15
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/usb-serial.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c14
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c42
-rw-r--r--drivers/usb/usb-skeleton.c19
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c3
-rw-r--r--drivers/w1/slaves/Kconfig1
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/pvcalls-back.c2
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/ref-verify.c2
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/btrfs/tree-log.c36
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/ceph/mds_client.c21
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c33
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/smb2pdu.c14
-rw-r--r--fs/cifs/smb2proto.h4
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/io_uring.c145
-rw-r--r--fs/libfs.c140
-rw-r--r--fs/nfs/direct.c106
-rw-r--r--fs/nfs/nfs4proc.c1
-rw-r--r--fs/nfs/write.c5
-rw-r--r--fs/ocfs2/aops.c25
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/journal.c3
-rw-r--r--fs/ocfs2/localalloc.c3
-rw-r--r--fs/ocfs2/xattr.c56
-rw-r--r--fs/proc/meminfo.c4
-rw-r--r--fs/proc/page.c28
-rw-r--r--fs/readdir.c48
-rw-r--r--fs/super.c5
-rw-r--r--fs/tracefs/inode.c46
-rw-r--r--fs/xfs/libxfs/xfs_ag.c5
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c21
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c2
-rw-r--r--fs/xfs/libxfs/xfs_fs.h8
-rw-r--r--fs/xfs/scrub/refcount.c3
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_buf.c12
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--include/drm/amd_asic_type.h56
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/drmP.h103
-rw-r--r--include/drm/drm_dp_helper.h81
-rw-r--r--include/drm/drm_dp_mst_helper.h160
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_helper.h1
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_gem_shmem_helper.h30
-rw-r--r--include/drm/drm_gem_ttm_helper.h2
-rw-r--r--include/drm/drm_gem_vram_helper.h50
-rw-r--r--include/drm/drm_os_linux.h55
-rw-r--r--include/drm/drm_plane.h31
-rw-r--r--include/drm/drm_simple_kms_helper.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h66
-rw-r--r--include/drm/ttm/ttm_bo_driver.h26
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/linux/bitmap.h3
-rw-r--r--include/linux/compiler_attributes.h17
-rw-r--r--include/linux/device_cgroup.h19
-rw-r--r--include/linux/dma-buf.h63
-rw-r--r--include/linux/export.h10
-rw-r--r--include/linux/gpio/driver.h8
-rw-r--r--include/linux/host1x.h26
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/leds.h5
-rw-r--r--include/linux/memcontrol.h29
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/page_ext.h10
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/string.h21
-rw-r--r--include/linux/sunrpc/xprtsock.h1
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/tpm_eventlog.h16
-rw-r--r--include/linux/uaccess.h6
-rw-r--r--include/linux/xarray.h4
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/net_namespace.h25
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sock.h33
-rw-r--r--include/net/tcp.h10
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/sound/hda_register.h3
-rw-r--r--include/trace/events/rxrpc.h18
-rw-r--r--include/trace/events/sock.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_fourcc.h2
-rw-r--r--include/uapi/drm/exynos_drm.h2
-rw-r--r--include/uapi/drm/omap_drm.h18
-rw-r--r--include/uapi/linux/serial_core.h2
-rw-r--r--kernel/events/core.c43
-rw-r--r--kernel/events/uprobes.c13
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/freezer.c6
-rwxr-xr-xkernel/gen_kheaders.sh11
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/main.c1
-rw-r--r--kernel/sched/cputime.c6
-rw-r--r--kernel/sched/fair.c36
-rw-r--r--kernel/stop_machine.c10
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/time/hrtimer.c8
-rw-r--r--kernel/trace/ftrace.c55
-rw-r--r--kernel/trace/trace.c139
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_dynevent.c4
-rw-r--r--kernel/trace/trace_events.c35
-rw-r--r--kernel/trace/trace_events_hist.c13
-rw-r--r--kernel/trace/trace_events_trigger.c8
-rw-r--r--kernel/trace/trace_hwlat.c4
-rw-r--r--kernel/trace/trace_kprobe.c12
-rw-r--r--kernel/trace/trace_printk.c7
-rw-r--r--kernel/trace/trace_stack.c8
-rw-r--r--kernel/trace/trace_stat.c6
-rw-r--r--kernel/trace/trace_uprobe.c11
-rw-r--r--lib/generic-radix-tree.c32
-rw-r--r--lib/string.c21
-rw-r--r--lib/test_meminit.c27
-rw-r--r--lib/test_user_copy.c37
-rw-r--r--lib/vdso/Kconfig9
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/gup.c14
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/kmemleak.c30
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/memory-failure.c36
-rw-r--r--mm/memory_hotplug.c72
-rw-r--r--mm/memremap.c13
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/page_ext.c23
-rw-r--r--mm/page_owner.c60
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shuffle.c2
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slab_common.c28
-rw-r--r--mm/slob.c62
-rw-r--r--mm/slub.c49
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/truncate.c12
-rw-r--r--mm/vmpressure.c20
-rw-r--r--mm/vmscan.c86
-rw-r--r--mm/z3fold.c10
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c3
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/net_namespace.c17
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/skbuff.c23
-rw-r--r--net/core/sock.c32
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/tcp.c75
-rw-r--r--net/ipv4/tcp_diag.c5
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c37
-rw-r--r--net/ipv4/tcp_ipv4.c30
-rw-r--r--net/ipv4/tcp_minisocks.c17
-rw-r--r--net/ipv4/tcp_output.c32
-rw-r--r--net/ipv4/tcp_timer.c11
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/llc/af_llc.c34
-rw-r--r--net/llc/llc_c_ac.c8
-rw-r--r--net/llc/llc_conn.c69
-rw-r--r--net/llc/llc_if.c12
-rw-r--r--net/llc/llc_s_ac.c12
-rw-r--r--net/llc/llc_sap.c23
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/rx.c11
-rw-r--r--net/mac80211/scan.c30
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/openvswitch/actions.c5
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_accept.c5
-rw-r--r--net/rxrpc/call_object.c34
-rw-r--r--net/rxrpc/conn_client.c9
-rw-r--r--net/rxrpc/conn_object.c13
-rw-r--r--net/rxrpc/conn_service.c2
-rw-r--r--net/rxrpc/peer_event.c11
-rw-r--r--net/rxrpc/peer_object.c16
-rw-r--r--net/rxrpc/recvmsg.c6
-rw-r--r--net/rxrpc/sendmsg.c3
-rw-r--r--net/sched/act_api.c23
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_mpls.c12
-rw-r--r--net/sched/cls_api.c36
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_etf.c2
-rw-r--r--net/sched/sch_netem.c11
-rw-r--r--net/sched/sch_taprio.c4
-rw-r--r--net/sctp/diag.c2
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/sm_make_chunk.c12
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/smc/smc_rx.c29
-rw-r--r--net/sunrpc/xprtsock.c17
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/vmw_vsock/virtio_transport_common.c17
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c1
-rw-r--r--net/wireless/reg.h8
-rw-r--r--net/wireless/scan.c23
-rw-r--r--net/wireless/wext-sme.c8
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xdp/xsk.c42
-rw-r--r--samples/bpf/asm_goto_workaround.h13
-rw-r--r--samples/bpf/task_fd_query_user.c1
-rw-r--r--scripts/coccinelle/api/devm_platform_ioremap_resource.cocci60
-rw-r--r--scripts/coccinelle/misc/add_namespace.cocci2
-rw-r--r--scripts/gdb/linux/dmesg.py16
-rw-r--r--scripts/gdb/linux/symbols.py8
-rw-r--r--scripts/gdb/linux/utils.py25
-rw-r--r--scripts/mod/modpost.c29
-rw-r--r--scripts/nsdeps4
-rw-r--r--scripts/recordmcount.h5
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--security/device_cgroup.c15
-rw-r--r--security/selinux/ss/services.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c35
-rw-r--r--sound/usb/pcm.c3
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h6
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h2
-rw-r--r--tools/bpf/Makefile6
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h3
-rw-r--r--tools/include/uapi/drm/i915_drm.h1
-rw-r--r--tools/include/uapi/linux/fs.h55
-rw-r--r--tools/include/uapi/linux/fscrypt.h181
-rw-r--r--tools/include/uapi/linux/kvm.h3
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h4
-rw-r--r--tools/lib/bpf/Makefile33
-rw-r--r--tools/lib/bpf/libbpf_internal.h16
-rw-r--r--tools/lib/bpf/xsk.c4
-rw-r--r--tools/lib/subcmd/Makefile8
-rw-r--r--tools/perf/Documentation/asciidoc.conf3
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt4
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c4
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c4
-rw-r--r--tools/perf/arch/powerpc/util/header.c3
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c6
-rw-r--r--tools/perf/arch/s390/util/header.c9
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c6
-rw-r--r--tools/perf/arch/x86/util/header.c3
-rw-r--r--tools/perf/builtin-kvm.c7
-rw-r--r--tools/perf/builtin-script.c6
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/basic.json (renamed from tools/perf/pmu-events/arch/s390/cf_m8561/basic.json)0
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto.json (renamed from tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json)0
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json (renamed from tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json)0
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/extended.json (renamed from tools/perf/pmu-events/arch/s390/cf_m8561/extended.json)0
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/jevents.c12
-rw-r--r--tools/perf/tests/perf-hooks.c3
-rw-r--r--tools/perf/util/annotate.c35
-rw-r--r--tools/perf/util/annotate.h4
-rw-r--r--tools/perf/util/evsel.c3
-rw-r--r--tools/perf/util/jitdump.c6
-rw-r--r--tools/perf/util/llvm-utils.c6
-rw-r--r--tools/perf/util/map.c3
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/testing/selftests/Makefile19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c3
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh3
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh6
-rw-r--r--tools/testing/selftests/kselftest/runner.sh36
-rwxr-xr-xtools/testing/selftests/kselftest_install.sh4
-rw-r--r--tools/testing/selftests/powerpc/mm/tlbie_test.c2
-rw-r--r--tools/testing/selftests/rtc/settings1
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c2
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c27
-rw-r--r--tools/virtio/crypto/hash.h (renamed from arch/arm64/kernel/vdso/gettimeofday.S)0
-rw-r--r--tools/virtio/linux/dma-mapping.h2
-rw-r--r--tools/virtio/xen/xen.h6
1287 files changed, 31149 insertions, 16390 deletions
diff --git a/CREDITS b/CREDITS
index 8b67a85844b5..031605d46b4d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1637,6 +1637,10 @@ S: Panoramastrasse 18
S: D-69126 Heidelberg
S: Germany
+N: Simon Horman
+M: horms@verge.net.au
+D: Renesas ARM/ARM64 SoC maintainer
+
N: Christopher Horn
E: chorn@warwick.net
D: Miscellaneous sysctl hacks
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 0fa8c0e615c2..5361ebec3361 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -615,8 +615,8 @@ on an IO device and is an example of this type.
Protections
-----------
-A cgroup is protected to be allocated upto the configured amount of
-the resource if the usages of all its ancestors are under their
+A cgroup is protected upto the configured amount of the resource
+as long as the usages of all its ancestors are under their
protected levels. Protections can be hard guarantees or best effort
soft boundaries. Protections can also be over-committed in which case
only upto the amount available to the parent is protected among
@@ -1096,7 +1096,10 @@ PAGE_SIZE multiple when read back.
is within its effective min boundary, the cgroup's memory
won't be reclaimed under any conditions. If there is no
unprotected reclaimable memory available, OOM killer
- is invoked.
+ is invoked. Above the effective min boundary (or
+ effective low boundary if it is higher), pages are reclaimed
+ proportionally to the overage, reducing reclaim pressure for
+ smaller overages.
Effective min boundary is limited by memory.min values of
all ancestor cgroups. If there is memory.min overcommitment
@@ -1118,7 +1121,10 @@ PAGE_SIZE multiple when read back.
Best-effort memory protection. If the memory usage of a
cgroup is within its effective low boundary, the cgroup's
memory won't be reclaimed unless memory can be reclaimed
- from unprotected cgroups.
+ from unprotected cgroups. Above the effective low boundary (or
+ effective min boundary if it is higher), pages are reclaimed
+ proportionally to the overage, reducing reclaim pressure for
+ smaller overages.
Effective low boundary is limited by memory.low values of
all ancestor cgroups. If there is memory.low overcommitment
@@ -2482,8 +2488,10 @@ system performance due to overreclaim, to the point where the feature
becomes self-defeating.
The memory.low boundary on the other hand is a top-down allocated
-reserve. A cgroup enjoys reclaim protection when it's within its low,
-which makes delegation of subtrees possible.
+reserve. A cgroup enjoys reclaim protection when it's within its
+effective low, which makes delegation of subtrees possible. It also
+enjoys having reclaim pressure proportional to its overage when
+above its effective low.
The original high boundary, the hard limit, is defined as a strict
limit that can not budge, even if the OOM killer has to be called.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index c7ac2f3ac99f..a84a83f8881e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5302,6 +5302,10 @@
the unplug protocol
never -- do not unplug even if version check succeeds
+ xen_legacy_crash [X86,XEN]
+ Crash from Xen panic notifier, without executing late
+ panic() code such as dumping handler.
+
xen_nopvspin [X86,XEN]
Disables the ticketlock slowpath using Xen PV
optimizations.
diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst
index b040909e45f8..02e02175e6f5 100644
--- a/Documentation/arm64/memory.rst
+++ b/Documentation/arm64/memory.rst
@@ -154,11 +154,18 @@ return virtual addresses to userspace from a 48-bit range.
Software can "opt-in" to receiving VAs from a 52-bit space by
specifying an mmap hint parameter that is larger than 48-bit.
+
For example:
- maybe_high_address = mmap(~0UL, size, prot, flags,...);
+
+.. code-block:: c
+
+ maybe_high_address = mmap(~0UL, size, prot, flags,...);
It is also possible to build a debug kernel that returns addresses
from a 52-bit space by enabling the following kernel config options:
+
+.. code-block:: sh
+
CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y
Note that this option is only intended for debugging applications
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 17ea3fecddaa..ab7ed2fd072f 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -107,6 +107,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index fa16a0538dcb..ab0eae1c153a 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -38,6 +38,7 @@ Core utilities
protection-keys
../RCU/index
gcc-plugins
+ symbol-namespaces
Interfaces for kernel debugging
diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst
index 7744aa3bf2e0..939e3dfc86e9 100644
--- a/Documentation/core-api/memory-allocation.rst
+++ b/Documentation/core-api/memory-allocation.rst
@@ -98,6 +98,10 @@ limited. The actual limit depends on the hardware and the kernel
configuration, but it is a good practice to use `kmalloc` for objects
smaller than page size.
+The address of a chunk allocated with `kmalloc` is aligned to at least
+ARCH_KMALLOC_MINALIGN bytes. For sizes which are a power of two, the
+alignment is also guaranteed to be at least the respective size.
+
For large allocations you can use :c:func:`vmalloc` and
:c:func:`vzalloc`, or directly request pages from the page
allocator. The memory allocated by `vmalloc` and related functions is
diff --git a/Documentation/kbuild/namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
index 982ed7b568ac..982ed7b568ac 100644
--- a/Documentation/kbuild/namespaces.rst
+++ b/Documentation/core-api/symbol-namespaces.rst
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index b72d07d70239..525296121d89 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
Both KASAN modes work with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
+To augment reports with last allocation and freeing stack of the physical page,
+it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
+
To disable instrumentation for specific files or directories, add a line
similar to the following to the respective kernel Makefile:
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 25604904fa6e..ecdfdc9d4b03 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -89,6 +89,22 @@ To build, save output files in a separate directory with KBUILD_OUTPUT ::
$ export KBUILD_OUTPUT=/tmp/kselftest; make TARGETS="size timers" kselftest
+Additionally you can use the "SKIP_TARGETS" variable on the make command
+line to specify one or more targets to exclude from the TARGETS list.
+
+To run all tests but a single subsystem::
+
+ $ make -C tools/testing/selftests SKIP_TARGETS=ptrace run_tests
+
+You can specify multiple tests to skip::
+
+ $ make SKIP_TARGETS="size timers" kselftest
+
+You can also specify a restricted list of tests to run together with a
+dedicated skiplist::
+
+ $ make TARGETS="bpf breakpoints size timers" SKIP_TARGETS=bpf kselftest
+
See the top-level tools/testing/selftests/Makefile for the list of all
possible targets.
diff --git a/Documentation/devicetree/bindings/display/arm,malidp.txt b/Documentation/devicetree/bindings/display/arm,malidp.txt
index 2f7870983ef1..7a97a2b48c2a 100644
--- a/Documentation/devicetree/bindings/display/arm,malidp.txt
+++ b/Documentation/devicetree/bindings/display/arm,malidp.txt
@@ -37,6 +37,8 @@ Optional properties:
Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
to be used for the framebuffer; if not present, the framebuffer may
be located anywhere in memory.
+ - arm,malidp-arqos-high-level: integer of u32 value describing the ARQoS
+ levels of DP500's QoS signaling.
Example:
@@ -54,6 +56,7 @@ Example:
clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
clock-names = "pxlclk", "mclk", "aclk", "pclk";
arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
+ arm,malidp-arqos-high-level = <0xd000d000>;
port {
dp0_output: endpoint {
remote-endpoint = <&tda998x_2_input>;
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
index db680413e89c..819f3e31013c 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
@@ -13,6 +13,7 @@ Required properties:
- compatible : Shall contain one or more of
- "renesas,r8a774a1-hdmi" for R8A774A1 (RZ/G2M) compatible HDMI TX
+ - "renesas,r8a774b1-hdmi" for R8A774B1 (RZ/G2N) compatible HDMI TX
- "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX
- "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX
- "renesas,r8a77965-hdmi" for R8A77965 (R-Car M3-N) compatible HDMI TX
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index c6a196d0b075..c62ce2494ed9 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
- "renesas,r8a7744-lvds" for R8A7744 (RZ/G1N) compatible LVDS encoders
- "renesas,r8a774a1-lvds" for R8A774A1 (RZ/G2M) compatible LVDS encoders
+ - "renesas,r8a774b1-lvds" for R8A774B1 (RZ/G2N) compatible LVDS encoders
- "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
- "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
- "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 8469de510001..b91e709db7a4 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -27,19 +27,22 @@ Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
- "mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
- "mediatek,<chip>-disp-rdma" - read DMA / line buffer
- "mediatek,<chip>-disp-wdma" - write DMA
- "mediatek,<chip>-disp-color" - color processor
- "mediatek,<chip>-disp-aal" - adaptive ambient light controller
- "mediatek,<chip>-disp-gamma" - gamma correction
- "mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
- "mediatek,<chip>-disp-split" - split stream to two encoders
- "mediatek,<chip>-disp-ufoe" - data compression engine
- "mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
- "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
- "mediatek,<chip>-disp-mutex" - display mutex
- "mediatek,<chip>-disp-od" - overdrive
+ "mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
+ "mediatek,<chip>-disp-ovl-2l" - overlay (2 layers, blending, csc)
+ "mediatek,<chip>-disp-rdma" - read DMA / line buffer
+ "mediatek,<chip>-disp-wdma" - write DMA
+ "mediatek,<chip>-disp-ccorr" - color correction
+ "mediatek,<chip>-disp-color" - color processor
+ "mediatek,<chip>-disp-dither" - dither
+ "mediatek,<chip>-disp-aal" - adaptive ambient light controller
+ "mediatek,<chip>-disp-gamma" - gamma correction
+ "mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
+ "mediatek,<chip>-disp-split" - split stream to two encoders
+ "mediatek,<chip>-disp-ufoe" - data compression engine
+ "mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
+ "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
+ "mediatek,<chip>-disp-mutex" - display mutex
+ "mediatek,<chip>-disp-od" - overdrive
the supported chips are mt2701, mt2712 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
@@ -49,6 +52,7 @@ Required properties (all function blocks):
For most function blocks this is just a single clock input. Only the DSI and
DPI controller nodes have multiple clock inputs. These are documented in
mediatek,dsi.txt and mediatek,dpi.txt, respectively.
+ An exception is that the mt8183 mutex is always free running with no clocks property.
Required properties (DMA function blocks):
- compatible: Should be one of
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index fadf327c7cdf..a19a6cc375ed 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -7,7 +7,7 @@ channel output.
Required properties:
- compatible: "mediatek,<chip>-dsi"
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
@@ -26,7 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
Required properties:
- compatible: "mediatek,<chip>-mipi-tx"
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- clocks: PLL reference clock
- clock-output-names: name of the output clock line to the DSI encoder
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index c97dfacad281..17cb2771364b 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a774a1" for R8A774A1 (RZ/G2M) compatible DU
+ - "renesas,du-r8a774b1" for R8A774B1 (RZ/G2N) compatible DU
- "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
@@ -60,6 +61,7 @@ corresponding to each DU output.
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A774A1 (RZ/G2M) DPAD 0 HDMI 0 LVDS 0 -
+ R8A774B1 (RZ/G2N) DPAD 0 HDMI 0 LVDS 0 -
R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 4f58c5a2d195..8b3a5f514205 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -20,6 +20,10 @@ Required properties:
"rockchip,rk3228-vop";
"rockchip,rk3328-vop";
+- reg: Must contain one entry corresponding to the base address and length
+ of the register space. Can optionally contain a second entry
+ corresponding to the CRTC gamma LUT address.
+
- interrupts: should contain a list of all VOP IP block interrupts in the
order: VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
@@ -48,7 +52,7 @@ Example:
SoC specific DT entry:
vopb: vopb@ff930000 {
compatible = "rockchip,rk3288-vop";
- reg = <0xff930000 0x19c>;
+ reg = <0x0 0xff930000 0x0 0x19c>, <0x0 0xff931000 0x0 0x1000>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
index f4c5d34c4111..7079d44bf3ba 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
@@ -1,8 +1,11 @@
* Advanced Interrupt Controller (AIC)
Required properties:
-- compatible: Should be "atmel,<chip>-aic"
- <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+ - "atmel,<chip>-aic" where <chip> can be "at91rm9200", "sama5d2",
+ "sama5d3" or "sama5d4"
+ - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. It should be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index dd63151dc8b6..b143d9a21b2d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -26,6 +26,8 @@ Required properties:
- "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
- "renesas,scif-r8a774a1" for R8A774A1 (RZ/G2M) SCIF compatible UART.
- "renesas,hscif-r8a774a1" for R8A774A1 (RZ/G2M) HSCIF compatible UART.
+ - "renesas,scif-r8a774b1" for R8A774B1 (RZ/G2N) SCIF compatible UART.
+ - "renesas,hscif-r8a774b1" for R8A774B1 (RZ/G2N) HSCIF compatible UART.
- "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART.
- "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
index b9f04e617eb7..6ffb09be7a76 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
@@ -85,8 +85,8 @@ A child node must exist to represent the core DWC2 IP block. The name of
the node is not important. The content of the node is defined in dwc2.txt.
PHY documentation is provided in the following places:
-- Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
-- Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
Example device nodes:
usb: usb@ffe09000 {
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 059f6ef1ad4a..1ca64c85191a 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -63,7 +63,11 @@ properties:
description:
Set this flag to force EHCI reset after resume.
- phys: true
+ phys:
+ description: PHY specifier for the USB PHY
+
+ phy-names:
+ const: usb
required:
- compatible
@@ -89,6 +93,7 @@ examples:
interrupts = <39>;
clocks = <&ahb_gates 1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
};
...
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
index da5a14becbe5..bcffec1f1341 100644
--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -67,7 +67,11 @@ properties:
description:
Overrides the detected port count
- phys: true
+ phys:
+ description: PHY specifier for the USB PHY
+
+ phy-names:
+ const: usb
required:
- compatible
@@ -84,6 +88,7 @@ examples:
interrupts = <64>;
clocks = <&usb_clk 6>, <&ahb_gates 2>;
phys = <&usbphy 1>;
+ phy-names = "usb";
};
...
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
index f3e4acecabe8..42d8814f903a 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
@@ -33,7 +33,7 @@ Required properties:
"dma_ck": dma_bus clock for data transfer by DMA,
"xhci_ck": controller clock
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
Optional properties:
- wakeup-source : enable USB remote wakeup;
@@ -53,7 +53,7 @@ Optional properties:
See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
- imod-interval-ns: default interrupt moderation interval is 5000ns
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
supported.
Example:
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index b9af7f5ee91d..e0ae6096f7ac 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -17,7 +17,7 @@ Required properties:
- clock-names : must contain "sys_ck" for clock of controller,
the following clocks are optional:
"ref_ck", "mcu_ck" and "dma_ck";
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
- dr_mode : should be one of "host", "peripheral" or "otg",
refer to usb/generic.txt
@@ -60,7 +60,7 @@ Optional properties:
- mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
bit1 for u3port1, ... etc;
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
supported.
Sub-nodes:
diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.yaml b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
index 9c8c56d3a792..7263b7f2b510 100644
--- a/Documentation/devicetree/bindings/usb/usb-hcd.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
@@ -18,8 +18,13 @@ properties:
description:
List of all the USB PHYs on this HCD
+ phy-names:
+ description:
+ Name specifier for the USB PHY
+
examples:
- |
usb {
phys = <&usb2_phy1>, <&usb3_phy1>;
+ phy-names = "usb";
};
diff --git a/Documentation/devicetree/bindings/usb/usb-uhci.txt b/Documentation/devicetree/bindings/usb/usb-uhci.txt
index cc2e6f7d602e..d1702eb2c8bd 100644
--- a/Documentation/devicetree/bindings/usb/usb-uhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-uhci.txt
@@ -6,7 +6,7 @@ Required properties:
- reg : Should contain 1 register ranges(address and length)
- interrupts : UHCI controller interrupt
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
supported.
Example:
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 97400e8f8605..b49b819571f9 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -41,9 +41,9 @@ Optional properties:
- usb3-lpm-capable: determines if platform is USB3 LPM capable
- quirk-broken-port-ped: set if the controller has broken port disable mechanism
- imod-interval-ns: default interrupt moderation interval is 5000ns
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
supported.
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index b541e97c7ab1..c78db28519f7 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -118,13 +118,13 @@ Kernel Functions and Structures Reference
Reservation Objects
-------------------
-.. kernel-doc:: drivers/dma-buf/reservation.c
+.. kernel-doc:: drivers/dma-buf/dma-resv.c
:doc: Reservation Object Overview
-.. kernel-doc:: drivers/dma-buf/reservation.c
+.. kernel-doc:: drivers/dma-buf/dma-resv.c
:export:
-.. kernel-doc:: include/linux/reservation.h
+.. kernel-doc:: include/linux/dma-resv.h
:internal:
DMA Fences
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index 5acdd1842ea2..5b9eaf23558e 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -79,12 +79,32 @@ AMDGPU XGMI Support
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
:internal:
-AMDGPU RAS debugfs control interface
-====================================
+AMDGPU RAS Support
+==================
+
+RAS debugfs/sysfs Control and Error Injection Interfaces
+--------------------------------------------------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:doc: AMDGPU RAS debugfs control interface
+RAS Error Count sysfs Interface
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs Error Count Interface
+
+RAS EEPROM debugfs Interface
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS debugfs EEPROM table reset interface
+
+RAS VRAM Bad Pages sysfs Interface
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:internal:
@@ -130,11 +150,11 @@ pp_od_clk_voltage
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
:doc: pp_od_clk_voltage
-pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+pp_dpm_*
+~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
- :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+ :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
pp_power_profile_mode
~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 3868008db8a9..9668a7fe2408 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -77,9 +77,6 @@ Atomic State Reset and Initialization
Atomic State Helper Reference
-----------------------------
-.. kernel-doc:: include/drm/drm_atomic_state_helper.h
- :internal:
-
.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 8dc147c93c9c..6792fa9b6b6b 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -7,6 +7,22 @@ TODO list
This section contains a list of smaller janitorial tasks in the kernel DRM
graphics subsystem useful as newbie projects. Or for slow rainy days.
+Difficulty
+----------
+
+To make it easier task are categorized into different levels:
+
+Starter: Good tasks to get started with the DRM subsystem.
+
+Intermediate: Tasks which need some experience with working in the DRM
+subsystem, or some specific GPU/display graphics knowledge. For debugging issue
+it's good to have the relevant hardware (or a virtual driver set up) available
+for testing.
+
+Advanced: Tricky tasks that need fairly good understanding of the DRM subsystem
+and graphics topics. Generally need the relevant hardware for development and
+testing.
+
Subsystem-wide refactorings
===========================
@@ -20,6 +36,8 @@ implementations), and then remove it.
Contact: Daniel Vetter, respective driver maintainers
+Level: Intermediate
+
Convert existing KMS drivers to atomic modesetting
--------------------------------------------------
@@ -38,6 +56,8 @@ do by directly using the new atomic helper driver callbacks.
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Clean up the clipped coordination confusion around planes
---------------------------------------------------------
@@ -50,6 +70,8 @@ helpers.
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
+Level: Advanced
+
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@@ -63,6 +85,8 @@ events for atomic commits correctly. But fixing these bugs is good anyway.
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Fallout from atomic KMS
-----------------------
@@ -91,6 +115,8 @@ interfaces to fix these issues:
Contact: Daniel Vetter
+Level: Intermediate
+
Get rid of dev->struct_mutex from GEM drivers
---------------------------------------------
@@ -114,6 +140,8 @@ fine-grained per-buffer object and per-context lockings scheme. Currently only t
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
----------------------------------------------------------------------------
@@ -129,6 +157,8 @@ are better.
Contact: Sean Paul, Maintainer of the driver you plan to convert
+Level: Starter
+
Convert drivers to use simple modeset suspend/resume
----------------------------------------------------
@@ -139,6 +169,8 @@ of the atomic suspend/resume code in older atomic modeset drivers.
Contact: Maintainer of the driver you plan to convert
+Level: Intermediate
+
Convert drivers to use drm_fb_helper_fbdev_setup/teardown()
-----------------------------------------------------------
@@ -157,6 +189,8 @@ probably use drm_fb_helper_fbdev_teardown().
Contact: Maintainer of the driver you plan to convert
+Level: Intermediate
+
Clean up mmap forwarding
------------------------
@@ -166,14 +200,16 @@ There's drm_gem_prime_mmap() for this now, but still needs to be rolled out.
Contact: Daniel Vetter
+Level: Intermediate
+
Generic fbdev defio support
---------------------------
The defio support code in the fbdev core has some very specific requirements,
-which means drivers need to have a special framebuffer for fbdev. Which prevents
-us from using the generic fbdev emulation code everywhere. The main issue is
-that it uses some fields in struct page itself, which breaks shmem gem objects
-(and other things).
+which means drivers need to have a special framebuffer for fbdev. The main
+issue is that it uses some fields in struct page itself, which breaks shmem
+gem objects (and other things). To support defio, affected drivers require
+the use of a shadow buffer, which may add CPU and memory overhead.
Possible solution would be to write our own defio mmap code in the drm fbdev
emulation. It would need to fully wrap the existing mmap ops, forwarding
@@ -196,6 +232,8 @@ Might be good to also have some igt testcases for this.
Contact: Daniel Vetter, Noralf Tronnes
+Level: Advanced
+
idr_init_base()
---------------
@@ -206,6 +244,8 @@ efficient.
Contact: Daniel Vetter
+Level: Starter
+
struct drm_gem_object_funcs
---------------------------
@@ -216,6 +256,8 @@ We also need a 2nd version of the CMA define that doesn't require the
vmapping to be present (different hook for prime importing). Plus this needs to
be rolled out to all drivers using their own implementations, too.
+Level: Intermediate
+
Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
---------------------------------------------------------
@@ -231,6 +273,8 @@ As a reference, take a look at the conversions already completed in drm core.
Contact: Sean Paul, respective driver maintainers
+Level: Starter
+
Rename CMA helpers to DMA helpers
---------------------------------
@@ -241,6 +285,9 @@ no one knows what that means) since underneath they just use dma_alloc_coherent.
Contact: Laurent Pinchart, Daniel Vetter
+Level: Intermediate (mostly because it is a huge tasks without good partial
+milestones, not technically itself that challenging)
+
Convert direct mode.vrefresh accesses to use drm_mode_vrefresh()
----------------------------------------------------------------
@@ -259,6 +306,8 @@ drm_display_mode to avoid future use.
Contact: Sean Paul
+Level: Starter
+
Remove drm_display_mode.hsync
-----------------------------
@@ -269,6 +318,8 @@ it to use drm_mode_hsync() instead.
Contact: Sean Paul
+Level: Starter
+
drm_fb_helper tasks
-------------------
@@ -284,6 +335,8 @@ drm_fb_helper tasks
removed: drm_fb_helper_single_add_all_connectors(),
drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
+Level: Intermediate
+
connector register/unregister fixes
-----------------------------------
@@ -296,21 +349,11 @@ connector register/unregister fixes
drm_dp_aux_init, and moving the actual registering into a late_register
callback as recommended in the kerneldoc.
+Level: Intermediate
+
Core refactorings
=================
-Clean up the DRM header mess
-----------------------------
-
-The DRM subsystem originally had only one huge global header, ``drmP.h``. This
-is now split up, but many source files still include it. The remaining part of
-the cleanup work here is to replace any ``#include <drm/drmP.h>`` by only the
-headers needed (and fixing up any missing pre-declarations in the headers).
-
-In the end no .c file should need to include ``drmP.h`` anymore.
-
-Contact: Daniel Vetter
-
Make panic handling work
------------------------
@@ -350,6 +393,8 @@ This is a really varied tasks with lots of little bits and pieces:
Contact: Daniel Vetter
+Level: Advanced
+
Clean up the debugfs support
----------------------------
@@ -379,6 +424,8 @@ There's a bunch of issues with it:
Contact: Daniel Vetter
+Level: Intermediate
+
KMS cleanups
------------
@@ -394,6 +441,8 @@ Some of these date from the very introduction of KMS in 2008 ...
end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
historical reasons) misnamed drm_primary_helper_destroy() function.
+Level: Intermediate
+
Better Testing
==============
@@ -402,6 +451,8 @@ Enable trinity for DRM
And fix up the fallout. Should be really interesting ...
+Level: Advanced
+
Make KMS tests in i-g-t generic
-------------------------------
@@ -415,6 +466,8 @@ converting things over. For modeset tests we also first need a bit of
infrastructure to use dumb buffers for untiled buffers, to be able to run all
the non-i915 specific modeset tests.
+Level: Advanced
+
Extend virtual test driver (VKMS)
---------------------------------
@@ -424,6 +477,8 @@ fit the available time.
Contact: Daniel Vetter
+Level: See details
+
Backlight Refactoring
---------------------
@@ -437,6 +492,8 @@ Plan to fix this:
Contact: Daniel Vetter
+Level: Intermediate
+
Driver Specific
===============
@@ -450,13 +507,6 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher
-i915
-----
-
-- Our early/late pm callbacks could be removed in favour of using
- device_link_add to model the dependency between i915 and snd_had. See
- https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
-
Bootsplash
==========
@@ -472,5 +522,36 @@ for fbdev.
Contact: Sam Ravnborg
+Level: Advanced
+
Outside DRM
===========
+
+Convert fbdev drivers to DRM
+----------------------------
+
+There are plenty of fbdev drivers for older hardware. Some hwardware has
+become obsolete, but some still provides good(-enough) framebuffers. The
+drivers that are still useful should be converted to DRM and afterwards
+removed from fbdev.
+
+Very simple fbdev drivers can best be converted by starting with a new
+DRM driver. Simple KMS helpers and SHMEM should be able to handle any
+existing hardware. The new driver's call-back functions are filled from
+existing fbdev code.
+
+More complex fbdev drivers can be refactored step-by-step into a DRM
+driver with the help of the DRM fbconv helpers. [1] These helpers provide
+the transition layer between the DRM core infrastructure and the fbdev
+driver interface. Create a new DRM driver on top of the fbconv helpers,
+copy over the fbdev driver, and hook it up to the DRM code. Examples for
+several fbdev drivers are available at [1] and a tutorial of this process
+available at [2]. The result is a primitive DRM driver that can run X11
+and Weston.
+
+ - [1] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
+ - [2] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
+
+Contact: Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Advanced
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 8147c3f218bf..230ad59b462b 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -7,6 +7,7 @@ Linux Hardware Monitoring
hwmon-kernel-api
pmbus-core
+ inspur-ipsps1
submitting-patches
sysfs-interface
userspace-tools
diff --git a/Documentation/hwmon/inspur-ipsps1.rst b/Documentation/hwmon/inspur-ipsps1.rst
index 2b871ae3448f..292c0c26bdd1 100644
--- a/Documentation/hwmon/inspur-ipsps1.rst
+++ b/Documentation/hwmon/inspur-ipsps1.rst
@@ -1,5 +1,5 @@
Kernel driver inspur-ipsps1
-=======================
+===========================
Supported chips:
diff --git a/Documentation/hwmon/k10temp.rst b/Documentation/hwmon/k10temp.rst
index 12a86ba17de9..4451d59b9425 100644
--- a/Documentation/hwmon/k10temp.rst
+++ b/Documentation/hwmon/k10temp.rst
@@ -21,10 +21,17 @@ Supported chips:
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
-* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri",
+ "Carrizo", "Stoney Ridge", "Bristol Ridge"
* AMD Family 16h processors: "Kabini", "Mullins"
+* AMD Family 17h processors: "Zen", "Zen 2"
+
+* AMD Family 18h processors: "Hygon Dhyana"
+
+* AMD Family 19h processors: "Zen 3"
+
Prefix: 'k10temp'
Addresses scanned: PCI space
@@ -110,3 +117,12 @@ The maximum value for Tctl is available in the file temp1_max.
If the BIOS has enabled hardware temperature control, the threshold at
which the processor will throttle itself to avoid damage is available in
temp1_crit and temp1_crit_hyst.
+
+On some AMD CPUs, there is a difference between the die temperature (Tdie) and
+the reported temperature (Tctl). Tdie is the real measured temperature, and
+Tctl is used for fan control. While Tctl is always available as temp1_input,
+the driver exports Tdie temperature as temp2_input for those CPUs which support
+it.
+
+Models from 17h family report relative temperature, the driver aims to
+compensate and report the real temperature.
diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/pensando/ionic.rst
index 67b6839d516b..13935896bee6 100644
--- a/Documentation/networking/device_drivers/pensando/ionic.rst
+++ b/Documentation/networking/device_drivers/pensando/ionic.rst
@@ -36,8 +36,10 @@ Support
=======
For general Linux networking support, please use the netdev mailing
list, which is monitored by Pensando personnel::
+
netdev@vger.kernel.org
For more specific support needs, please use the Pensando driver support
email::
- drivers@pensando.io
+
+ drivers@pensando.io
diff --git a/Documentation/networking/net_dim.txt b/Documentation/networking/net_dim.txt
index 9cb31c5e2dcd..9bdb7d5a3ba3 100644
--- a/Documentation/networking/net_dim.txt
+++ b/Documentation/networking/net_dim.txt
@@ -92,16 +92,16 @@ under some conditions.
Part III: Registering a Network Device to DIM
==============================================
-Net DIM API exposes the main function net_dim(struct net_dim *dim,
-struct net_dim_sample end_sample). This function is the entry point to the Net
+Net DIM API exposes the main function net_dim(struct dim *dim,
+struct dim_sample end_sample). This function is the entry point to the Net
DIM algorithm and has to be called every time the driver would like to check if
it should change interrupt moderation parameters. The driver should provide two
-data structures: struct net_dim and struct net_dim_sample. Struct net_dim
+data structures: struct dim and struct dim_sample. Struct dim
describes the state of DIM for a specific object (RX queue, TX queue,
other queues, etc.). This includes the current selected profile, previous data
samples, the callback function provided by the driver and more.
-Struct net_dim_sample describes a data sample, which will be compared to the
-data sample stored in struct net_dim in order to decide on the algorithm's next
+Struct dim_sample describes a data sample, which will be compared to the
+data sample stored in struct dim in order to decide on the algorithm's next
step. The sample should include bytes, packets and interrupts, measured by
the driver.
@@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each
interrupt. Since Net DIM has a built-in moderation and it might decide to skip
iterations under certain conditions, there is no need to moderate the net_dim()
calls as well. As mentioned above, the driver needs to provide an object of type
-struct net_dim to the net_dim() function call. It is advised for each entity
-using Net DIM to hold a struct net_dim as part of its data structure and use it
-as the main Net DIM API object. The struct net_dim_sample should hold the latest
+struct dim to the net_dim() function call. It is advised for each entity
+using Net DIM to hold a struct dim as part of its data structure and use it
+as the main Net DIM API object. The struct dim_sample should hold the latest
bytes, packets and interrupts count. No need to perform any calculations, just
include the raw data.
@@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear.
my_driver.c:
-#include <linux/net_dim.h>
+#include <linux/dim.h>
/* Callback for net DIM to schedule on a decision to change moderation */
void my_driver_do_dim_work(struct work_struct *work)
{
- /* Get struct net_dim from struct work_struct */
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ /* Get struct dim from struct work_struct */
+ struct dim *dim = container_of(work, struct dim,
+ work);
/* Do interrupt moderation related stuff */
...
/* Signal net DIM work is done and it should move to next iteration */
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
/* My driver's interrupt handler */
@@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
{
...
/* A struct to hold current measured data */
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
...
/* Initiate data sample struct with current data */
- net_dim_sample(my_entity->events,
- my_entity->packets,
- my_entity->bytes,
- &dim_sample);
+ dim_update_sample(my_entity->events,
+ my_entity->packets,
+ my_entity->bytes,
+ &dim_sample);
/* Call net DIM */
net_dim(&my_entity->dim, dim_sample);
...
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index f4a2198187f9..ada573b7d703 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -56,7 +56,7 @@ instead of ``double-indenting`` the ``case`` labels. E.g.:
case 'K':
case 'k':
mem <<= 10;
- /* fall through */
+ fallthrough;
default:
break;
}
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index 053b24a6dd38..179f2a5625a0 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -122,14 +122,27 @@ memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
Implicit switch case fall-through
---------------------------------
-The C language allows switch cases to "fall through" when
-a "break" statement is missing at the end of a case. This,
-however, introduces ambiguity in the code, as it's not always
-clear if the missing break is intentional or a bug. As there
-have been a long list of flaws `due to missing "break" statements
+The C language allows switch cases to "fall-through" when a "break" statement
+is missing at the end of a case. This, however, introduces ambiguity in the
+code, as it's not always clear if the missing break is intentional or a bug.
+
+As there have been a long list of flaws `due to missing "break" statements
<https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow
-"implicit fall-through". In order to identify an intentional fall-through
-case, we have adopted the marking used by static analyzers: a comment
-saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))`
-is more widely handled by C compilers, static analyzers, and IDEs, we can
-switch to using that instead.
+"implicit fall-through".
+
+In order to identify intentional fall-through cases, we have adopted a
+pseudo-keyword macro 'fallthrough' which expands to gcc's extension
+__attribute__((__fallthrough__)). `Statement Attributes
+<https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html>`_
+
+When the C17/C18 [[fallthrough]] syntax is more commonly supported by
+C compilers, static analyzers, and IDEs, we can switch to using that syntax
+for the macro pseudo-keyword.
+
+All switch/case blocks must end in one of:
+
+ break;
+ fallthrough;
+ continue;
+ goto <label>;
+ return [expression];
diff --git a/Documentation/usb/rio.rst b/Documentation/usb/rio.rst
deleted file mode 100644
index ea73475471db..000000000000
--- a/Documentation/usb/rio.rst
+++ /dev/null
@@ -1,109 +0,0 @@
-============
-Diamonds Rio
-============
-
-Copyright (C) 1999, 2000 Bruce Tenison
-
-Portions Copyright (C) 1999, 2000 David Nelson
-
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-Changes
-=======
-
-- Initial Revision
-
-
-Overview
-========
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.
-Before I explain how to use this to access the Rio500 please be warned:
-
-.. warning::
-
- Please note that this software is still under development. The authors
- are in no way responsible for any damage that may occur, no matter how
- inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore
-down to nothing and appear to have corrupted the flash memory. My RIO
-needed to be replaced as a result. Diamond tech support is aware of the
-problem. Do NOT allow your batteries to wear down to nothing before
-changing them. It appears RIO 500 firmware does not handle low battery
-power well at all.
-
-On systems with OHCI controllers, the kernel OHCI code appears to have
-power on problems with some chipsets. If you are having problems
-connecting to your RIO 500, try turning it on first and then plugging it
-into the USB cable.
-
-Contact Information
--------------------
-
- The main page for the project is hosted at sourceforge.net in the following
- URL: <http://rio500.sourceforge.net>. You can also go to the project's
- sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
- There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
-with some important information regarding the communication with the Rio.
-
-Additional Information and userspace tools
-
- http://rio500.sourceforge.net/
-
-
-Requirements
-============
-
-A host with a USB port running a Linux kernel with RIO 500 support enabled.
-
-The driver is a module called rio500, which should be automatically loaded
-as you plug in your device. If that fails you can manually load it with
-
- modprobe rio500
-
-Udev should automatically create a device node as soon as plug in your device.
-If that fails, you can manually add a device for the USB rio500::
-
- mknod /dev/usb/rio500 c 180 64
-
-In that case, set appropriate permissions for /dev/usb/rio500 (don't forget
-about group and world permissions). Both read and write permissions are
-required for proper operation.
-
-That's it. The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-Limits
-======
-
-You can use only a single rio500 device at a time with your computer.
-
-Bugs
-====
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
diff --git a/MAINTAINERS b/MAINTAINERS
index 07ad2f9d2d20..b63c291ad029 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1251,6 +1251,7 @@ F: Documentation/devicetree/bindings/display/arm,hdlcd.txt
ARM KOMEDA DRM-KMS DRIVER
M: James (Qian) Wang <james.qian.wang@arm.com>
M: Liviu Dudau <liviu.dudau@arm.com>
+M: Mihail Atanassov <mihail.atanassov@arm.com>
L: Mali DP Maintainers <malidp@foss.arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -2167,12 +2168,10 @@ F: arch/arm64/boot/dts/realtek/
F: Documentation/devicetree/bindings/arm/realtek.yaml
ARM/RENESAS ARM64 ARCHITECTURE
-M: Simon Horman <horms@verge.net.au>
M: Geert Uytterhoeven <geert+renesas@glider.be>
M: Magnus Damm <magnus.damm@gmail.com>
L: linux-renesas-soc@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
S: Supported
F: arch/arm64/boot/dts/renesas/
@@ -2284,12 +2283,10 @@ S: Maintained
F: drivers/media/platform/s5p-mfc/
ARM/SHMOBILE ARM ARCHITECTURE
-M: Simon Horman <horms@verge.net.au>
M: Geert Uytterhoeven <geert+renesas@glider.be>
M: Magnus Damm <magnus.damm@gmail.com>
L: linux-renesas-soc@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
S: Supported
F: arch/arm/boot/dts/emev2*
@@ -9138,7 +9135,7 @@ F: drivers/auxdisplay/ks0108.c
F: include/linux/ks0108.h
L3MDEV
-M: David Ahern <dsa@cumulusnetworks.com>
+M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: net/l3mdev
@@ -9199,6 +9196,7 @@ M: Pavel Machek <pavel@ucw.cz>
R: Dan Murphy <dmurphy@ti.com>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
S: Maintained
F: Documentation/devicetree/bindings/leds/
F: drivers/leds/
@@ -10270,7 +10268,7 @@ MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@openwrt.org>
M: John Crispin <john@phrozen.org>
M: Sean Wang <sean.wang@mediatek.com>
-M: Nelson Chang <nelson.chang@mediatek.com>
+M: Mark Lee <Mark-MC.Lee@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
@@ -11559,6 +11557,7 @@ NSDEPS
M: Matthias Maennich <maennich@google.com>
S: Maintained
F: scripts/nsdeps
+F: Documentation/core-api/symbol-namespaces.rst
NTB AMD DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
@@ -12326,12 +12325,15 @@ F: arch/parisc/
F: Documentation/parisc/
F: drivers/parisc/
F: drivers/char/agp/parisc-agp.c
+F: drivers/input/misc/hp_sdc_rtc.c
F: drivers/input/serio/gscps2.c
+F: drivers/input/serio/hp_sdc*
F: drivers/parport/parport_gsc.*
F: drivers/tty/serial/8250/8250_gsc.c
F: drivers/video/fbdev/sti*
F: drivers/video/console/sti*
F: drivers/video/logo/logo_parisc*
+F: include/linux/hp_sdc.h
PARMAN
M: Jiri Pirko <jiri@mellanox.com>
@@ -13375,7 +13377,7 @@ S: Maintained
F: drivers/scsi/qla1280.[ch]
QLOGIC QLA2XXX FC-SCSI DRIVER
-M: qla2xxx-upstream@qlogic.com
+M: hmadhani@marvell.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
@@ -16778,13 +16780,6 @@ W: http://www.linux-usb.org/usbnet
S: Maintained
F: drivers/net/usb/dm9601.c
-USB DIAMOND RIO500 DRIVER
-M: Cesar Miquel <miquel@df.uba.ar>
-L: rio500-users@lists.sourceforge.net
-W: http://rio500.sourceforge.net
-S: Maintained
-F: drivers/usb/misc/rio500*
-
USB EHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -17451,7 +17446,7 @@ F: include/linux/regulator/
K: regulator_get_optional
VRF
-M: David Ahern <dsa@cumulusnetworks.com>
+M: David Ahern <dsahern@kernel.org>
M: Shrijeet Mukherjee <shrijeet@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index f47dfdec7086..5475cdb6d57d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Nesting Opossum
# *DOCUMENTATION*
@@ -599,7 +599,7 @@ endif
# in addition to whatever we do anyway.
# Just "make" or "make all" shall build modules as well
-ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
+ifneq ($(filter all _all modules nsdeps,$(MAKECMDGOALS)),)
KBUILD_MODULES := 1
endif
@@ -1037,7 +1037,7 @@ export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
@@ -1217,9 +1217,8 @@ PHONY += kselftest
kselftest:
$(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
-PHONY += kselftest-clean
-kselftest-clean:
- $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
+kselftest-%: FORCE
+ $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests $*
PHONY += kselftest-merge
kselftest-merge:
diff --git a/arch/arm/boot/dts/mt7629-rfb.dts b/arch/arm/boot/dts/mt7629-rfb.dts
index 3621b7d2b22a..9980c10c6e29 100644
--- a/arch/arm/boot/dts/mt7629-rfb.dts
+++ b/arch/arm/boot/dts/mt7629-rfb.dts
@@ -66,9 +66,21 @@
pinctrl-1 = <&ephy_leds_pins>;
status = "okay";
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
+ phy-mode = "gmii";
phy-handle = <&phy0>;
};
@@ -78,7 +90,6 @@
phy0: ethernet-phy@0 {
reg = <0>;
- phy-mode = "gmii";
};
};
};
diff --git a/arch/arm/boot/dts/mt7629.dtsi b/arch/arm/boot/dts/mt7629.dtsi
index 9608bc2ccb3f..867b88103b9d 100644
--- a/arch/arm/boot/dts/mt7629.dtsi
+++ b/arch/arm/boot/dts/mt7629.dtsi
@@ -468,14 +468,12 @@
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b128000 0x3000>;
#clock-cells = <1>;
- mediatek,physpeed = "2500";
};
sgmiisys1: syscon@1b130000 {
compatible = "mediatek,mt7629-sgmiisys", "syscon";
reg = <0x1b130000 0x3000>;
#clock-cells = <1>;
- mediatek,physpeed = "2500";
};
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index ce823c44e98a..4c268b70b735 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -520,6 +520,7 @@
interrupts = <39>;
clocks = <&ccu CLK_AHB_EHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -529,6 +530,7 @@
interrupts = <64>;
clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -608,6 +610,7 @@
interrupts = <40>;
clocks = <&ccu CLK_AHB_EHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -617,6 +620,7 @@
interrupts = <65>;
clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index cfb1efc8828c..6befa236ba99 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -391,6 +391,7 @@
interrupts = <39>;
clocks = <&ccu CLK_AHB_EHCI>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -400,6 +401,7 @@
interrupts = <40>;
clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index bbeb743633c6..ac7638078420 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -545,6 +545,7 @@
clocks = <&ccu CLK_AHB1_EHCI0>;
resets = <&ccu RST_AHB1_EHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -555,6 +556,7 @@
clocks = <&ccu CLK_AHB1_OHCI0>, <&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_AHB1_OHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -565,6 +567,7 @@
clocks = <&ccu CLK_AHB1_EHCI1>;
resets = <&ccu RST_AHB1_EHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -575,6 +578,7 @@
clocks = <&ccu CLK_AHB1_OHCI1>, <&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_AHB1_OHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 49380de754a9..874231be04e4 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -623,6 +623,7 @@
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB_EHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -632,6 +633,7 @@
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -714,6 +716,7 @@
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB_EHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -723,6 +726,7 @@
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 52eed0ae3607..f292f96ab39b 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -307,6 +307,7 @@
clocks = <&ccu CLK_BUS_EHCI>;
resets = <&ccu RST_BUS_EHCI>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -317,6 +318,7 @@
clocks = <&ccu CLK_BUS_OHCI>, <&ccu CLK_USB_OHCI>;
resets = <&ccu RST_BUS_OHCI>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index 523be6611c50..74bb053cf23c 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -632,6 +632,7 @@
clocks = <&ccu CLK_BUS_EHCI0>;
resets = <&ccu RST_BUS_EHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -643,6 +644,7 @@
clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -654,6 +656,7 @@
clocks = <&ccu CLK_BUS_EHCI1>;
resets = <&ccu RST_BUS_EHCI1>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index bde068111b85..c9c2688db66d 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -273,6 +273,7 @@
clocks = <&ccu CLK_BUS_EHCI1>;
resets = <&ccu RST_BUS_EHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -284,6 +285,7 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -294,6 +296,7 @@
clocks = <&ccu CLK_BUS_EHCI2>;
resets = <&ccu RST_BUS_EHCI2>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -305,6 +308,7 @@
<&ccu CLK_USB_OHCI2>;
resets = <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index c34d505c7efe..b9b6fb00be28 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -346,6 +346,7 @@
clocks = <&usb_clocks CLK_BUS_HCI0>;
resets = <&usb_clocks RST_USB0_HCI>;
phys = <&usbphy1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -357,6 +358,7 @@
<&usb_clocks CLK_USB_OHCI0>;
resets = <&usb_clocks RST_USB0_HCI>;
phys = <&usbphy1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -378,6 +380,7 @@
clocks = <&usb_clocks CLK_BUS_HCI1>;
resets = <&usb_clocks RST_USB1_HCI>;
phys = <&usbphy2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -407,6 +410,7 @@
clocks = <&usb_clocks CLK_BUS_HCI2>;
resets = <&usb_clocks RST_USB2_HCI>;
phys = <&usbphy3>;
+ phy-names = "usb";
status = "disabled";
};
@@ -418,6 +422,7 @@
<&usb_clocks CLK_USB_OHCI2>;
resets = <&usb_clocks RST_USB2_HCI>;
phys = <&usbphy3>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index eba190b3f9de..107eeafad20a 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -304,6 +304,7 @@
clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -315,6 +316,7 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -325,6 +327,7 @@
clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -336,6 +339,7 @@
<&ccu CLK_USB_OHCI2>;
resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
+ phy-names = "usb";
status = "disabled";
};
@@ -346,6 +350,7 @@
clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
+ phy-names = "usb";
status = "disabled";
};
@@ -357,6 +362,7 @@
<&ccu CLK_USB_OHCI3>;
resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 5ae5b5228467..ef484c4cfd1a 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -91,7 +91,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index e4f6442588e7..4fec2ec379ad 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -195,7 +195,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 787c3f9be414..b817c57f05f1 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -581,7 +581,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 95b5a4ffddea..73ed73a8785a 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -327,7 +327,6 @@ CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 4fb51d665abb..a1cdbfa064c5 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -189,7 +189,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYTHERM=m
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index b24df84a1d7a..043b0b18bf7e 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -98,6 +98,7 @@ config CRYPTO_AES_ARM_CE
tristate "Accelerated AES using ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
+ select CRYPTO_LIB_AES
select CRYPTO_SIMD
help
Use an implementation of AES in CBC, CTR and XTS modes that uses
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
index b978cdf133af..4d1707388d94 100644
--- a/arch/arm/crypto/aes-ce-core.S
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -9,6 +9,7 @@
#include <asm/assembler.h>
.text
+ .arch armv8-a
.fpu crypto-neon-fp-armv8
.align 3
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 41a9b4257b72..3f047afb982c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -110,7 +110,6 @@ config ARM64
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
- select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT)
select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
select HAVE_PCI
@@ -617,6 +616,23 @@ config CAVIUM_ERRATUM_30115
If unsure, say Y.
+config CAVIUM_TX2_ERRATUM_219
+ bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
+ default y
+ help
+ On Cavium ThunderX2, a load, store or prefetch instruction between a
+ TTBR update and the corresponding context synchronizing operation can
+ cause a spurious Data Abort to be delivered to any hardware thread in
+ the CPU core.
+
+ Work around the issue by avoiding the problematic code sequence and
+ trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
+ trap handler performs the corresponding register access, skips the
+ instruction and ensures context synchronization by virtue of the
+ exception return.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
@@ -1159,7 +1175,7 @@ menuconfig COMPAT
if COMPAT
config KUSER_HELPERS
- bool "Enable kuser helpers page for 32 bit applications"
+ bool "Enable kuser helpers page for 32-bit applications"
default y
help
Warning: disabling this option may break 32-bit user programs.
@@ -1185,6 +1201,18 @@ config KUSER_HELPERS
Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y.
+config COMPAT_VDSO
+ bool "Enable vDSO for 32-bit applications"
+ depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != ""
+ select GENERIC_COMPAT_VDSO
+ default y
+ help
+ Place in the process address space of 32-bit applications an
+ ELF shared object providing fast implementations of gettimeofday
+ and clock_gettime.
+
+ You must have a 32-bit build of glibc 2.22 or later for programs
+ to seamlessly take advantage of this.
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 84a3d502c5a5..2c0238ce0551 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -53,22 +53,6 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable)
endif
endif
-ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
- CROSS_COMPILE_COMPAT ?= $(CONFIG_CROSS_COMPILE_COMPAT_VDSO:"%"=%)
-
- ifeq ($(CONFIG_CC_IS_CLANG), y)
- $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
- else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
- $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
- else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
- $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
- else
- export CROSS_COMPILE_COMPAT
- export CONFIG_COMPAT_VDSO := y
- compat_vdso := -DCONFIG_COMPAT_VDSO=1
- endif
-endif
-
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \
$(compat_vdso) $(cc_has_k_constraint)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 2b6345db7dc0..78c82a665c84 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -104,6 +104,7 @@
&ehci0 {
phys = <&usbphy 0>;
+ phy-names = "usb";
status = "okay";
};
@@ -150,6 +151,7 @@
&ohci0 {
phys = <&usbphy 0>;
+ phy-names = "usb";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 69128a6dfc46..3eccbdba7154 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -553,6 +553,7 @@
resets = <&ccu RST_BUS_OHCI1>,
<&ccu RST_BUS_EHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
@@ -564,6 +565,7 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 4020a1aafa3e..0d5ea19336a1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -547,6 +547,7 @@
resets = <&ccu RST_BUS_OHCI3>,
<&ccu RST_BUS_EHCI3>;
phys = <&usb2phy 3>;
+ phy-names = "usb";
status = "disabled";
};
@@ -558,6 +559,7 @@
<&ccu CLK_USB_OHCI3>;
resets = <&ccu RST_BUS_OHCI3>;
phys = <&usb2phy 3>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index f74909ba29bd..5bf963830b17 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -78,10 +78,9 @@ alternative_else_nop_endif
/*
* Remove the address tag from a virtual address, if present.
*/
- .macro clear_address_tag, dst, addr
- tst \addr, #(1 << 55)
- bic \dst, \addr, #(0xff << 56)
- csel \dst, \dst, \addr, eq
+ .macro untagged_addr, dst, addr
+ sbfx \dst, \addr, #0, #56
+ and \dst, \dst, \addr
.endm
#endif
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index c6bd87d2915b..574808b9df4c 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
}
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
-static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
+static __always_inline u##sz \
+__lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
{ \
@@ -362,7 +363,8 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, cl...) \
-static inline long __lse__cmpxchg_double##name(unsigned long old1, \
+static __always_inline long \
+__lse__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index f19fe4b9acc4..ac1dbca3d0cd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -52,7 +52,9 @@
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
-#define ARM64_NCAPS 45
+#define ARM64_NCAPS 47
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b61b50bf68b1..c23c47360664 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void)
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
-#define untagged_addr(addr) \
+#define __untagged_addr(addr) \
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
+#define untagged_addr(addr) ({ \
+ u64 __addr = (__force u64)addr; \
+ __addr &= __untagged_addr(__addr); \
+ (__force __typeof__(addr))__addr; \
+})
+
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
-#define __tag_reset(addr) untagged_addr(addr)
+#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
#define __tag_shifted(tag) 0UL
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7576df00eb50..8330810f699e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
-#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
-
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 972d196c7714..6e919fafb43d 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -212,7 +212,7 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
-#define SYS_PAR_EL1_F BIT(1)
+#define SYS_PAR_EL1_F BIT(0)
#define SYS_PAR_EL1_FST GENMASK(6, 1)
/*** Statistical Profiling Extension ***/
diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h
index fb60a88b5ed4..3fd8fd6d8fc2 100644
--- a/arch/arm64/include/asm/vdso/compat_barrier.h
+++ b/arch/arm64/include/asm/vdso/compat_barrier.h
@@ -20,7 +20,7 @@
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
-#if __LINUX_ARM_ARCH__ >= 8
+#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD)
#define aarch32_smp_mb() dmb(ish)
#define aarch32_smp_rmb() dmb(ishld)
#define aarch32_smp_wmb() dmb(ishst)
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
deleted file mode 100644
index 1f38bf330a6e..000000000000
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 ARM Limited
- */
-#ifndef __ASM_VDSO_DATAPAGE_H
-#define __ASM_VDSO_DATAPAGE_H
-
-#ifndef __ASSEMBLY__
-
-struct vdso_data {
- __u64 cs_cycle_last; /* Timebase at clocksource init */
- __u64 raw_time_sec; /* Raw time */
- __u64 raw_time_nsec;
- __u64 xtime_clock_sec; /* Kernel time */
- __u64 xtime_clock_nsec;
- __u64 xtime_coarse_sec; /* Coarse time */
- __u64 xtime_coarse_nsec;
- __u64 wtm_clock_sec; /* Wall to monotonic time */
- __u64 wtm_clock_nsec;
- __u32 tb_seq_count; /* Timebase sequence counter */
- /* cs_* members must be adjacent and in this order (ldp accesses) */
- __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
- __u32 cs_shift; /* Clocksource shift (mono = raw) */
- __u32 cs_raw_mult; /* Raw clocksource multiplier */
- __u32 tz_minuteswest; /* Whacky timezone stuff */
- __u32 tz_dsttime;
- __u32 use_syscall;
- __u32 hrtimer_res;
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 2ec09debc2bb..ca158be21f83 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops)
struct insn_emulation *insn;
insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return;
+
insn->ops = ops;
insn->min = INSN_UNDEF;
@@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void)
insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
GFP_KERNEL);
+ if (!insns_sysctl)
+ return;
raw_spin_lock_irqsave(&insn_emulation_lock, flags);
list_for_each_entry(insn, &insn_emulation, node) {
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 1e43ba5c79b7..6c3b10a41bd8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -12,6 +12,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -128,8 +129,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
int cpu, slot = -1;
/*
- * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
- * start/end if we're a guest. Skip the hyp-vectors work.
+ * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+ * we're a guest. Skip the hyp-vectors work.
*/
if (!hyp_vecs_start) {
__this_cpu_write(bp_hardening_data.fn, fn);
@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0);
}
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ {},
+};
+
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ int i;
+
+ if (!is_affected_midr_range_list(entry, scope) ||
+ !is_hyp_mode_available())
+ return false;
+
+ for_each_possible_cpu(i) {
+ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+ return true;
+ }
+
+ return false;
+}
+
#ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = {
@@ -852,6 +877,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_cortex_a76_erratum_1463225,
},
#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ .matches = needs_tx2_tvm_workaround,
+ },
+ {
+ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9323bcc40a58..80f459ad0190 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -175,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
ARM64_FTR_END,
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 84a822748c84..cf3bd2976e57 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -604,7 +604,7 @@ el1_da:
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
- clear_address_tag x0, x3
+ untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
orr x24, x24, x0
alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path
- bl preempt_schedule_irq // irq en/disable is done inside
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
@@ -775,6 +775,7 @@ el0_sync_compat:
b.ge el0_dbg
b el0_inv
el0_svc_compat:
+ gic_prio_kentry_setup tmp=x1
mov x0, sp
bl el0_svc_compat_handler
b ret_to_user
@@ -807,7 +808,7 @@ el0_da:
mrs x26, far_el1
ct_user_exit_irqoff
enable_daif
- clear_address_tag x0, x26
+ untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
@@ -1070,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
#else
ldr x30, =vectors
#endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
prfm plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
msr vbar_el1, x30
add x30, x30, #(1b - tramp_vectors)
isb
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 171773257974..06e56b470315 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/*
* Ensure updated trampoline is visible to instruction
- * fetch before we patch in the branch.
+ * fetch before we patch in the branch. Although the
+ * architecture doesn't require an IPI in this case,
+ * Neoverse-N1 erratum #1542419 does require one
+ * if the TLB maintenance in module_enable_ro() is
+ * skipped due to rodata_enabled. It doesn't seem worth
+ * it to make it conditional given that this is
+ * certainly not a fast-path.
*/
- __flush_icache_range((unsigned long)&dst[0],
- (unsigned long)&dst[1]);
+ flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index e0a7fce0e01c..a96b2921d22c 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
gfp_t mask)
{
int rc = 0;
+ pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
@@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
memcpy((void *)dst, src_start, length);
__flush_icache_range(dst, dst + length);
- pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+ trans_pgd = allocator(mask);
+ if (!trans_pgd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = allocator(mask);
if (!pudp) {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a47462def04b..71f788cd2b18 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -17,6 +17,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
+#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
@@ -44,6 +45,7 @@
#include <asm/alternative.h>
#include <asm/arch_gicv3.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
@@ -332,22 +334,27 @@ void arch_release_task_struct(struct task_struct *tsk)
fpsimd_release_task(tsk);
}
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied. We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead. This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
+ /* We rely on the above assignment to initialize dst's thread_flags: */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+ /*
+ * Detach src's sve_state (if any) from dst so that it does not
+ * get erroneously used or freed prematurely. dst's sve_state
+ * will be allocated on demand later on if dst uses SVE.
+ * For consistency, also clear TIF_SVE here: this could be done
+ * later in copy_process(), but to avoid tripping up future
+ * maintainers it is best not to leave TIF_SVE and sve_state in
+ * an inconsistent state, even temporarily.
+ */
+ dst->thread.sve_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SVE);
+
return 0;
}
@@ -361,13 +368,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
/*
- * Unalias p->thread.sve_state (if any) from the parent task
- * and disable discard SVE state for p:
- */
- clear_tsk_thread_flag(p, TIF_SVE);
- p->thread.sve_state = NULL;
-
- /*
* In case p was allocated the same task_struct pointer as some
* other recently-exited task, make sure p is disassociated from
* any cpu that may have run that now-exited task recently.
@@ -633,3 +633,19 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (static_branch_likely(&arm64_const_caps_ready))
+ preempt_schedule_irq();
+}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 1fba0776ed40..76b327f88fbb 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -8,15 +8,21 @@
ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
include $(srctree)/lib/vdso/Makefile
-COMPATCC := $(CROSS_COMPILE_COMPAT)gcc
+# Same as cc-*option, but using CC_COMPAT instead of CC
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+CC_COMPAT ?= $(CC)
+else
+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+endif
-# Same as cc-*option, but using COMPATCC instead of CC
cc32-option = $(call try-run,\
- $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+ $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
cc32-disable-warning = $(call try-run,\
- $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+ $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
cc32-ldoption = $(call try-run,\
- $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+ $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-as-instr = $(call try-run,\
+ printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
# We cannot use the global flags to compile the vDSO files, the main reason
# being that the 32-bit compiler may be older than the main (64-bit) compiler
@@ -25,22 +31,21 @@ cc32-ldoption = $(call try-run,\
# arm64 one.
# As a result we set our own flags here.
-# From top-level Makefile
-# NOSTDINC_FLAGS
-VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include)
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
VDSO_CPPFLAGS += $(LINUXINCLUDE)
-VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS)
# Common C and assembly flags
# From top-level Makefile
VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),)
+VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+endif
+
VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
ifdef CONFIG_DEBUG_INFO
VDSO_CAFLAGS += -g
endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(COMPATCC)), y)
-VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO
-endif
# From arm Makefile
VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
@@ -55,6 +60,7 @@ endif
VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
+
# Try to compile for ARMv8. If the compiler is too old and doesn't support it,
# fall back to v7. There is no easy way to check for what architecture the code
# is being compiled, so define a macro specifying that (see arch/arm/Makefile).
@@ -91,6 +97,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast
VDSO_AFLAGS := $(VDSO_CAFLAGS)
VDSO_AFLAGS += -D__ASSEMBLY__
+# Check for binutils support for dmb ishld
+dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
+
+VDSO_CFLAGS += $(dmbinstr)
+VDSO_AFLAGS += $(dmbinstr)
+
VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
# From arm vDSO Makefile
VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
@@ -159,14 +171,14 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
quiet_cmd_vdsold = LD32 $@
- cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
+ cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsocc = CC32 $@
- cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+ cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
quiet_cmd_vdsocc_gettimeofday = CC32 $@
- cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
+ cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
quiet_cmd_vdsoas = AS32 $@
- cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+ cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(obj)/$(munge) $< $@
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 3d3815020e36..799e84a40335 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ hcr |= HCR_TVM;
+
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE)
- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+ if (vcpu->arch.hcr_el2 & HCR_VSE) {
+ vcpu->arch.hcr_el2 &= ~HCR_VSE;
+ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ }
if (has_vhe())
deactivate_traps_vhe();
@@ -380,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ /*
+ * The normal sysreg handling code expects to see the traps,
+ * let's not do anything here.
+ */
+ if (vcpu->arch.hcr_el2 & HCR_TVM)
+ return false;
+
+ switch (sysreg) {
+ case SYS_SCTLR_EL1:
+ write_sysreg_el1(val, SYS_SCTLR);
+ break;
+ case SYS_TTBR0_EL1:
+ write_sysreg_el1(val, SYS_TTBR0);
+ break;
+ case SYS_TTBR1_EL1:
+ write_sysreg_el1(val, SYS_TTBR1);
+ break;
+ case SYS_TCR_EL1:
+ write_sysreg_el1(val, SYS_TCR);
+ break;
+ case SYS_ESR_EL1:
+ write_sysreg_el1(val, SYS_ESR);
+ break;
+ case SYS_FAR_EL1:
+ write_sysreg_el1(val, SYS_FAR);
+ break;
+ case SYS_AFSR0_EL1:
+ write_sysreg_el1(val, SYS_AFSR0);
+ break;
+ case SYS_AFSR1_EL1:
+ write_sysreg_el1(val, SYS_AFSR1);
+ break;
+ case SYS_MAIR_EL1:
+ write_sysreg_el1(val, SYS_MAIR);
+ break;
+ case SYS_AMAIR_EL1:
+ write_sysreg_el1(val, SYS_AMAIR);
+ break;
+ case SYS_CONTEXTIDR_EL1:
+ write_sysreg_el1(val, SYS_CONTEXTIDR);
+ break;
+ default:
+ return false;
+ }
+
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -399,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+ handle_tx2_tvm(vcpu))
+ return true;
+
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 115d7a0e4b08..9fc6db0bcbad 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -113,6 +113,15 @@ static inline bool is_ttbr1_addr(unsigned long addr)
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
}
+static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
+{
+ /* Either init_pg_dir or swapper_pg_dir */
+ if (mm == &init_mm)
+ return __pa_symbol(mm->pgd);
+
+ return (unsigned long)virt_to_phys(mm->pgd);
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -141,7 +150,7 @@ static void show_pte(unsigned long addr)
pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
+ vabits_actual, mm_to_pgd_phys(mm));
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -259,14 +268,18 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
par = read_sysreg(par_el1);
local_irq_restore(flags);
+ /*
+ * If we now have a valid translation, treat the translation fault as
+ * spurious.
+ */
if (!(par & SYS_PAR_EL1_F))
- return false;
+ return true;
/*
* If we got a different type of fault from the AT instruction,
* treat the translation fault as spurious.
*/
- dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
+ dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
}
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 16bef819fe98..914af125a7fa 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -571,7 +571,6 @@ CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYPRESS_CY7C63=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 8762e75f5d5f..2c7adea7638f 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -314,7 +314,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_CYTHERM=m
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 8772617b64ce..80112f2298b6 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -43,7 +43,7 @@
/* O32 stack has to be 8-byte aligned. */
static u64 o32_stk[4096];
-#define O32_STK &o32_stk[sizeof(o32_stk)]
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 79bf34efbc04..f6136871561d 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -77,8 +77,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size);
-static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
- int size)
+static __always_inline
+unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
{
switch (size) {
case 1:
@@ -153,8 +153,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size);
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, unsigned int size)
+static __always_inline
+unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
{
switch (size) {
case 1:
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
index a2aba4b059e6..1ade1daa4921 100644
--- a/arch/mips/include/uapi/asm/hwcap.h
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -6,5 +6,16 @@
#define HWCAP_MIPS_R6 (1 << 0)
#define HWCAP_MIPS_MSA (1 << 1)
#define HWCAP_MIPS_CRC32 (1 << 2)
+#define HWCAP_MIPS_MIPS16 (1 << 3)
+#define HWCAP_MIPS_MDMX (1 << 4)
+#define HWCAP_MIPS_MIPS3D (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP (1 << 7)
+#define HWCAP_MIPS_DSP2 (1 << 8)
+#define HWCAP_MIPS_DSP3 (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI (1 << 11)
+#define HWCAP_LOONGSON_EXT (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c2eb392597bf..f521cbf934e7 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -2180,6 +2180,39 @@ void cpu_probe(void)
elf_hwcap |= HWCAP_MIPS_MSA;
}
+ if (cpu_has_mips16)
+ elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+ if (cpu_has_mdmx)
+ elf_hwcap |= HWCAP_MIPS_MDMX;
+
+ if (cpu_has_mips3d)
+ elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+ if (cpu_has_smartmips)
+ elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+ if (cpu_has_dsp)
+ elf_hwcap |= HWCAP_MIPS_DSP;
+
+ if (cpu_has_dsp2)
+ elf_hwcap |= HWCAP_MIPS_DSP2;
+
+ if (cpu_has_dsp3)
+ elf_hwcap |= HWCAP_MIPS_DSP3;
+
+ if (cpu_has_mips16e2)
+ elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+ if (cpu_has_loongson_mmi)
+ elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+ if (cpu_has_loongson_ext)
+ elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+ if (cpu_has_loongson_ext2)
+ elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
if (cpu_has_vz)
cpu_probe_vz(c);
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index c1a4d4dc4665..9f79908f5063 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -66,6 +66,10 @@ else
$(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
endif
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
#
# Loongson Machines' Support
#
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 807f0f782f75..996a934ece7d 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -15,6 +15,7 @@ ccflags-vdso := \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
-D__VDSO__
ifdef CONFIG_CC_IS_CLANG
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 73ca89a47f49..e5de3f897633 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -22,7 +22,7 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index 3eb4bfc1fb36..e080143e79a3 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -52,7 +52,7 @@
})
#ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+# define __lock_aligned __section(.data..lock_aligned)
#endif
#endif /* __PARISC_LDCW_H */
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 92a9b5f12f98..f29f682352f0 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -3,7 +3,7 @@
* arch/parisc/mm/ioremap.c
*
* (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
* (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
- vfree(addr);
+ vunmap(addr);
return NULL;
}
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
EXPORT_SYMBOL(__ioremap);
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
{
- if (addr > high_memory)
- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+ if (is_vmalloc_addr((void *)addr))
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 4ce795d30377..ca8db193ae38 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -35,6 +35,10 @@ static inline void radix__flush_all_lpid(unsigned int lpid)
{
WARN_ON(1);
}
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
+{
+ WARN_ON(1);
+}
#endif
extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 74a9cfe84aee..faebcbb8c4db 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1921,6 +1921,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_PCR, r6
18:
/* Signal secondary CPUs to continue */
+ li r0, 0
stb r0,VCORE_IN_GUEST(r5)
19: lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1d93e55a2de1..2dd452a047cd 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -761,6 +761,7 @@ static int spufs_init_fs_context(struct fs_context *fc)
ctx->gid = current_gid();
ctx->mode = 0755;
+ fc->fs_private = ctx;
fc->s_fs_info = sbi;
fc->ops = &spufs_context_ops;
return 0;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index b53359258d99..f87a5c64e24d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1419,6 +1419,9 @@ void __init pseries_lpar_read_hblkrm_characteristics(void)
unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
int call_status, len, idx, bpsize;
+ if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
+ return;
+
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 104d334511cd..88cfcb96bf23 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -13,6 +13,7 @@
compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
chosen {
+ stdout-path = "serial0";
};
cpus {
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7255f2d8395b..42292d99cc74 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -87,14 +87,6 @@ extern pgd_t swapper_pg_dir[];
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
-#define FIXADDR_TOP VMALLOC_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE PMD_SIZE
-#else
-#define FIXADDR_SIZE PGDIR_SIZE
-#endif
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
@@ -108,6 +100,14 @@ extern pgd_t swapper_pg_dir[];
#define vmemmap ((struct page *)VMEMMAP_START)
+#define FIXADDR_TOP (VMEMMAP_START)
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE PMD_SIZE
+#else
+#define FIXADDR_SIZE PGDIR_SIZE
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 37ae4e367ad2..f02188a5b0f4 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -10,10 +10,6 @@
#include <linux/mm_types.h>
#include <asm/smp.h>
-/*
- * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
- * cache as well, so a 'fence.i' is not necessary.
- */
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 2d592da1e776..8ca479831142 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -273,12 +273,11 @@ restore_all:
resume_kernel:
REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
bnez s0, restore_all
-need_resched:
REG_L s0, TASK_TI_FLAGS(tp)
andi s0, s0, _TIF_NEED_RESCHED
beqz s0, restore_all
call preempt_schedule_irq
- j need_resched
+ j restore_all
#endif
work_pending:
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 424eb72d56b1..1ac75f7d0bff 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -124,24 +124,24 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage void do_trap_break(struct pt_regs *regs)
{
+ if (user_mode(regs)) {
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *)(regs->sepc));
+ return;
+ }
#ifdef CONFIG_GENERIC_BUG
- if (!user_mode(regs)) {
+ {
enum bug_trap_type type;
type = report_bug(regs->sepc, regs);
- switch (type) {
- case BUG_TRAP_TYPE_NONE:
- break;
- case BUG_TRAP_TYPE_WARN:
+ if (type == BUG_TRAP_TYPE_WARN) {
regs->sepc += get_break_insn_length(regs->sepc);
- break;
- case BUG_TRAP_TYPE_BUG:
- die(regs, "Kernel BUG");
+ return;
}
}
#endif /* CONFIG_GENERIC_BUG */
- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc));
+ die(regs, "Kernel BUG");
}
#ifdef CONFIG_GENERIC_BUG
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index bd2fd9a7821d..a470f1fa9f2a 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -83,7 +83,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
__rc; \
})
-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
unsigned long spec = 0x010000UL;
int rc;
@@ -113,7 +113,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
return rc;
}
-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
unsigned long spec = 0x01UL;
int rc;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index fbc1aecf0f94..eb24cb1afc11 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -29,7 +29,6 @@ config SPARC
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
- select HAVE_FAST_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
index 149795c369f2..25019d42ae93 100644
--- a/arch/x86/boot/compressed/acpi.c
+++ b/arch/x86/boot/compressed/acpi.c
@@ -21,30 +21,6 @@
struct mem_vector immovable_mem[MAX_NUMNODES*2];
/*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
- acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
- char val[MAX_ADDR_LEN] = { };
- int ret;
-
- ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
- if (ret < 0)
- return 0;
-
- if (kstrtoull(val, 16, &addr))
- return 0;
-#endif
- return addr;
-}
-
-/*
* Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
* ACPI_TABLE_GUID are found, take the former, which has more features.
*/
@@ -298,6 +274,30 @@ acpi_physical_address get_rsdp_addr(void)
}
#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+ acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+ char val[MAX_ADDR_LEN] = { };
+ int ret;
+
+ ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+ if (ret < 0)
+ return 0;
+
+ if (kstrtoull(val, 16, &addr))
+ return 0;
+#endif
+ return addr;
+}
+
/* Compute SRAT address from RSDP. */
static unsigned long get_acpi_srat_table(void)
{
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 53ac0cb2396d..9652d5c2afda 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -345,6 +345,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ unsigned long needed_size;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@@ -379,26 +380,38 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ /*
+ * The memory hole needed for the kernel is the larger of either
+ * the entire decompressed kernel plus relocation table, or the
+ * entire decompressed kernel plus .bss and .brk sections.
+ *
+ * On X86_64, the memory is mapped with PMD pages. Round the
+ * size up so that the full extent of PMD pages mapped is
+ * included in the check against the valid memory table
+ * entries. This ensures the full mapped area is usable RAM
+ * and doesn't include any reserved areas.
+ */
+ needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+ needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
/* Report initial kernel position details. */
debug_putaddr(input_data);
debug_putaddr(input_len);
debug_putaddr(output);
debug_putaddr(output_len);
debug_putaddr(kernel_total_size);
+ debug_putaddr(needed_size);
#ifdef CONFIG_X86_64
/* Report address of 32-bit trampoline */
debug_putaddr(trampoline_32bit);
#endif
- /*
- * The memory hole needed for the kernel is the larger of either
- * the entire decompressed kernel plus relocation table, or the
- * entire decompressed kernel plus .bss and .brk sections.
- */
choose_random_location((unsigned long)input_data, input_len,
(unsigned long *)&output,
- max(output_len, kernel_total_size),
+ needed_size,
&virt_addr);
/* Validate memory location choices. */
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e7d35f60d53f..64c3e70b0556 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -5,12 +5,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <asm/apicdef.h>
#include <asm/nmi.h>
#include "../perf_event.h"
-static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
+static unsigned long perf_nmi_window;
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@@ -641,11 +643,12 @@ static void amd_pmu_disable_event(struct perf_event *event)
* handler when multiple PMCs are active or PMC overflow while handling some
* other source of an NMI.
*
- * Attempt to mitigate this by using the number of active PMCs to determine
- * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
- * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
- * number of active PMCs or 2. The value of 2 is used in case an NMI does not
- * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
+ * received during this window will be claimed. This prevents extending the
+ * window past when it is possible that latent NMIs should be received. The
+ * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
+ * handled a counter. When an un-handled NMI is received, it will be claimed
+ * only if arriving within that window.
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
@@ -663,21 +666,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
handled = x86_pmu_handle_irq(regs);
/*
- * If a counter was handled, record the number of possible remaining
- * NMIs that can occur.
+ * If a counter was handled, record a timestamp such that un-handled
+ * NMIs will be claimed if arriving within that window.
*/
if (handled) {
- this_cpu_write(perf_nmi_counter,
- min_t(unsigned int, 2, active));
+ this_cpu_write(perf_nmi_tstamp,
+ jiffies + perf_nmi_window);
return handled;
}
- if (!this_cpu_read(perf_nmi_counter))
+ if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
return NMI_DONE;
- this_cpu_dec(perf_nmi_counter);
-
return NMI_HANDLED;
}
@@ -909,6 +910,9 @@ static int __init amd_core_pmu_init(void)
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0;
+ /* Avoid calulating the value each time in the NMI handler */
+ perf_nmi_window = msecs_to_jiffies(100);
+
switch (boot_cpu_data.x86) {
case 0x15:
pr_cont("Fam15h ");
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 27ee47a7be66..fcef678c3423 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4983,6 +4983,8 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L:
case INTEL_FAM6_KABYLAKE:
+ case INTEL_FAM6_COMETLAKE_L:
+ case INTEL_FAM6_COMETLAKE:
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -5031,6 +5033,8 @@ __init int intel_pmu_init(void)
/* fall through */
case INTEL_FAM6_ICELAKE_L:
case INTEL_FAM6_ICELAKE:
+ case INTEL_FAM6_TIGERLAKE_L:
+ case INTEL_FAM6_TIGERLAKE:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 9f2f39003d96..e1daf4151e11 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -45,46 +45,49 @@
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
- CNL
+ * CNL,KBL,CML
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- * SKL,KNL,GLM,CNL
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
- * Available model: SNB,IVB,HSW,BDW,SKL,CNL
+ * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
+ * ICL,TGL
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+ * KBL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- * GLM,CNL
+ * GLM,CNL,KBL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL,GLM,CNL
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+ * KBL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
* perf code: 0x04
- * Available model: HSW ULT,KBL,CNL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
* perf code: 0x05
- * Available model: HSW ULT,KBL,CNL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
- * Available model: HSW ULT,KBL,GLM,CNL
+ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
* Scope: Package (physical package)
*
*/
@@ -544,6 +547,19 @@ static const struct cstate_model cnl_cstates __initconst = {
BIT(PERF_CSTATE_PKG_C10_RES),
};
+static const struct cstate_model icl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C6_RES) |
+ BIT(PERF_CSTATE_CORE_C7_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
+ BIT(PERF_CSTATE_PKG_C7_RES) |
+ BIT(PERF_CSTATE_PKG_C8_RES) |
+ BIT(PERF_CSTATE_PKG_C9_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
static const struct cstate_model slm_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
@@ -614,6 +630,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE_L, hswult_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE, hswult_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates),
@@ -625,8 +643,10 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, snb_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE_L, icl_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE, icl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index b1afc77f0704..6f86650b3f77 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -89,7 +89,14 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_L:
case INTEL_FAM6_KABYLAKE:
+ case INTEL_FAM6_COMETLAKE_L:
+ case INTEL_FAM6_COMETLAKE:
case INTEL_FAM6_ICELAKE_L:
+ case INTEL_FAM6_ICELAKE:
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_D:
+ case INTEL_FAM6_TIGERLAKE_L:
+ case INTEL_FAM6_TIGERLAKE:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5c056b8aebef..e01078e93dd3 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
}
if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
- pr_info("Hyper-V: Using MSR based APIC access\n");
+ pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+ x2apic_enabled() ? "x2apic" : "xapic");
+ /*
+ * With x2apic, architectural x2apic MSRs are equivalent to the
+ * respective synthetic MSRs, so there's no need to override
+ * the apic accessors. The only exception is
+ * hv_apic_eoi_write, because it benefits from lazy EOI when
+ * available, but it works for both xapic and x2apic modes.
+ */
apic_set_eoi_write(hv_apic_eoi_write);
- apic->read = hv_apic_read;
- apic->write = hv_apic_write;
- apic->icr_write = hv_apic_icr_write;
- apic->icr_read = hv_apic_icr_read;
+ if (!x2apic_enabled()) {
+ apic->read = hv_apic_read;
+ apic->write = hv_apic_write;
+ apic->icr_write = hv_apic_icr_write;
+ apic->icr_read = hv_apic_icr_read;
+ }
}
}
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index cff3f3f3bfe0..8348f7d69fd5 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPU_ENTRY_AREA_H
#define _ASM_X86_CPU_ENTRY_AREA_H
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index f04622500da3..c606c0b70738 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -83,6 +83,9 @@
#define INTEL_FAM6_TIGERLAKE_L 0x8C
#define INTEL_FAM6_TIGERLAKE 0x8D
+#define INTEL_FAM6_COMETLAKE 0xA5
+#define INTEL_FAM6_COMETLAKE_L 0xA6
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index e28f8b723b5c..9d5252c9685c 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -21,7 +21,7 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
#define MWAITX_ECX_TIMER_ENABLE BIT(1)
#define MWAITX_MAX_LOOPS ((u32)-1)
-#define MWAITX_DISABLE_CSTATES 0xf
+#define MWAITX_DISABLE_CSTATES 0xf0
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 5df09a0b80b8..07375b476c4f 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PTI_H
#define _ASM_X86_PTI_H
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 35c225ede0e4..61d93f062a36 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -734,5 +734,28 @@ do { \
if (unlikely(__gu_err)) goto err_label; \
} while (0)
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)src,(type __user *)dst,label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label) \
+do { \
+ char __user *__ucu_dst = (_dst); \
+ const char *__ucu_src = (_src); \
+ size_t __ucu_len = (_len); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
+} while (0)
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 45e92cba92f5..b0889c48a2ac 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -156,7 +156,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
{
struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+ if (cmsk)
+ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
return 0;
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 267daad8c036..c656d92cd708 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -216,6 +216,10 @@ static void __init ms_hyperv_init_platform(void)
int hv_host_info_ecx;
int hv_host_info_edx;
+#ifdef CONFIG_PARAVIRT
+ pv_info.name = "Hyper-V";
+#endif
+
/*
* Extract the features and hints
*/
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 9735139cfdf8..46d732696c1c 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -49,7 +49,7 @@
#define VMWARE_CMD_VCPU_RESERVED 31
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
- __asm__("inl (%%dx)" : \
+ __asm__("inl (%%dx), %%eax" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"a"(VMWARE_HYPERVISOR_MAGIC), \
"c"(VMWARE_CMD_##cmd), \
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 29ffa495bd1c..206a4b6144c2 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
* we might write invalid pmds, when the kernel is relocated
* cleanup_highmap() fixes this up along with the mappings
* beyond _end.
+ *
+ * Only the region occupied by the kernel image has so far
+ * been checked against the table of usable memory regions
+ * provided by the firmware, so invalidate pages outside that
+ * region. A page table entry that maps to a reserved area of
+ * memory would allow processor speculation into that area,
+ * and on some hardware (particularly the UV platform) even
+ * speculative access to some reserved areas is caught as an
+ * error, causing the BIOS to halt the system.
*/
pmd = fixup_pointer(level2_kernel_pgt, physaddr);
- for (i = 0; i < PTRS_PER_PMD; i++) {
+
+ /* invalidate pages before the kernel image */
+ for (i = 0; i < pmd_index((unsigned long)_text); i++)
+ pmd[i] &= ~_PAGE_PRESENT;
+
+ /* fixup pages that are part of the kernel image */
+ for (; i <= pmd_index((unsigned long)_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;
- }
+
+ /* invalidate pages after the kernel image */
+ for (; i < PTRS_PER_PMD; i++)
+ pmd[i] &= ~_PAGE_PRESENT;
/*
* Fixup phys_base - remove the memory encryption mask to obtain
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
index 320ab978fb1f..1d0797b2338a 100644
--- a/arch/x86/kernel/process.h
+++ b/arch/x86/kernel/process.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Code shared between 32 and 64 bit
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index b7375dc6898f..c126571e5e2e 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
/*
- * AMD, like Intel, supports the EAX hint and EAX=0xf
- * means, do not enter any deep C-state and we use it
+ * AMD, like Intel's MWAIT version, supports the EAX hint and
+ * EAX=0xf0 means, do not enter any deep C-state and we use it
* here in delay() to minimize wakeup latency.
*/
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c202e1b07e29..425e025341db 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -917,9 +917,6 @@ static void __init kexec_enter_virtual_mode(void)
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
-
- /* clean DUMMY object */
- efi_delete_dummy_variable();
#endif
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 750f46ad018a..205b1176084f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -269,19 +269,41 @@ void xen_reboot(int reason)
BUG();
}
+static int reboot_reason = SHUTDOWN_reboot;
+static bool xen_legacy_crash;
void xen_emergency_restart(void)
{
- xen_reboot(SHUTDOWN_reboot);
+ xen_reboot(reboot_reason);
}
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- if (!kexec_crash_loaded())
- xen_reboot(SHUTDOWN_crash);
+ if (!kexec_crash_loaded()) {
+ if (xen_legacy_crash)
+ xen_reboot(SHUTDOWN_crash);
+
+ reboot_reason = SHUTDOWN_crash;
+
+ /*
+ * If panic_timeout==0 then we are supposed to wait forever.
+ * However, to preserve original dom0 behavior we have to drop
+ * into hypervisor. (domU behavior is controlled by its
+ * config file)
+ */
+ if (panic_timeout == 0)
+ panic_timeout = -1;
+ }
return NOTIFY_DONE;
}
+static int __init parse_xen_legacy_crash(char *arg)
+{
+ xen_legacy_crash = true;
+ return 0;
+}
+early_param("xen_legacy_crash", parse_xen_legacy_crash);
+
static struct notifier_block xen_panic_block = {
.notifier_call = xen_panic_event,
.priority = INT_MIN
diff --git a/arch/xtensa/boot/dts/virt.dts b/arch/xtensa/boot/dts/virt.dts
index a9dcd87b6eb1..611b98a02a65 100644
--- a/arch/xtensa/boot/dts/virt.dts
+++ b/arch/xtensa/boot/dts/virt.dts
@@ -56,7 +56,7 @@
reg = <0xf0100000 0x03f00000>;
// BUS_ADDRESS(3) CPU_PHYSICAL(1) SIZE(2)
- ranges = <0x01000000 0x0 0xf0000000 0xf0000000 0x0 0x00010000>,
+ ranges = <0x01000000 0x0 0x00000000 0xf0000000 0x0 0x00010000>,
<0x02000000 0x0 0xf4000000 0xf4000000 0x0 0x08000000>;
// PCI_DEVICE(3) INT#(1) CONTROLLER(PHANDLE) CONTROLLER_DATA(2)
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index aeb15f4c755b..be8b2be5a98b 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
" getex %0\n"
" beqz %0, 1b\n"
: "=&a" (tmp)
- : "a" (~mask), "a" (p)
+ : "a" (mask), "a" (p)
: "memory");
}
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6792928ba84a..3f80386f1883 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -100,7 +100,7 @@ do { \
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
- retval = __copy_to_user(ptr, &__v64, 8); \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
break; \
} \
default: __put_user_bad(); \
@@ -132,14 +132,14 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
- " _bbci.l %3, 0, 1f \n" \
- " movi %0, %4 \n" \
+ " _bbci.l %[addr], 0, 1f \n" \
+ " movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
- " _bbsi.l %3, 0, 0f \n" \
- " _bbci.l %3, 1, 1f \n" \
- "0: movi %0, %4 \n" \
+ " _bbsi.l %[addr], 0, 0f \n" \
+ " _bbci.l %[addr], 1, 1f \n" \
+ "0: movi %[err], %[efault] \n" \
" _j 2f \n"
@@ -151,40 +151,40 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __put_user_asm(x, addr, err, align, insn, cb) \
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
+ "1: "insn" %[x], %[addr], 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
" .literal_position \n" \
"5: \n" \
- " movi %1, 2b \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
- :"=r" (err), "=r" (cb) \
- :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+ :[err] "+r"(err_), [tmp] "=r"(cb) \
+ :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = 0; \
__gu_err; \
})
@@ -198,8 +198,17 @@ do { \
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
- case 8: retval = __copy_from_user(&x, ptr, 8); break; \
- default: (x) = __get_user_bad(); \
+ case 8: { \
+ u64 __x; \
+ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
+ retval = -EFAULT; \
+ (x) = 0; \
+ } else { \
+ (x) = *(__force __typeof__((ptr)))&__x; \
+ } \
+ break; \
+ } \
+ default: (x) = 0; __get_user_bad(); \
} \
} while (0)
@@ -208,25 +217,28 @@ do { \
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__( \
- __check_align_##align \
- "1: "insn" %2, %3, 0 \n" \
- "2: \n" \
- " .section .fixup,\"ax\" \n" \
- " .align 4 \n" \
- " .literal_position \n" \
- "5: \n" \
- " movi %1, 2b \n" \
- " movi %2, 0 \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " .long 1b, 5b \n" \
- " .previous" \
- :"=r" (err), "=r" (cb), "=r" (x) \
- :"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do { \
+ u32 __x = 0; \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[addr], 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+ :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+ (x_) = (__force __typeof__(*(addr_)))__x; \
+} while (0)
/*
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 04f19de46700..4092555828b1 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
// FIXME EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
extern long common_exception_return;
EXPORT_SYMBOL(common_exception_return);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b6f20be0fc78..5d21027b1faf 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1362,7 +1362,7 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
struct blkg_policy_data *pd_prealloc = NULL;
- struct blkcg_gq *blkg;
+ struct blkcg_gq *blkg, *pinned_blkg = NULL;
int ret;
if (blkcg_policy_enabled(q, pol))
@@ -1370,49 +1370,82 @@ int blkcg_activate_policy(struct request_queue *q,
if (queue_is_mq(q))
blk_mq_freeze_queue(q);
-pd_prealloc:
- if (!pd_prealloc) {
- pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
- if (!pd_prealloc) {
- ret = -ENOMEM;
- goto out_bypass_end;
- }
- }
-
+retry:
spin_lock_irq(&q->queue_lock);
- /* blkg_list is pushed at the head, reverse walk to init parents first */
+ /* blkg_list is pushed at the head, reverse walk to allocate parents first */
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
- if (!pd)
- swap(pd, pd_prealloc);
+ /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
+ if (blkg == pinned_blkg) {
+ pd = pd_prealloc;
+ pd_prealloc = NULL;
+ } else {
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
+ blkg->blkcg);
+ }
+
if (!pd) {
+ /*
+ * GFP_NOWAIT failed. Free the existing one and
+ * prealloc for @blkg w/ GFP_KERNEL.
+ */
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
+ blkg_get(blkg);
+ pinned_blkg = blkg;
+
spin_unlock_irq(&q->queue_lock);
- goto pd_prealloc;
+
+ if (pd_prealloc)
+ pol->pd_free_fn(pd_prealloc);
+ pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
+ blkg->blkcg);
+ if (pd_prealloc)
+ goto retry;
+ else
+ goto enomem;
}
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
- if (pol->pd_init_fn)
- pol->pd_init_fn(pd);
}
+ /* all allocated, init in the same order */
+ if (pol->pd_init_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_init_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
spin_unlock_irq(&q->queue_lock);
-out_bypass_end:
+out:
if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
+
+enomem:
+ /* alloc failed, nothing's initialized yet, free everything */
+ spin_lock_irq(&q->queue_lock);
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (blkg->pd[pol->plid]) {
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
+ }
+ spin_unlock_irq(&q->queue_lock);
+ ret = -ENOMEM;
+ goto out;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 61b635bc2a31..656460636ad3 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -160,24 +160,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
return ret;
}
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
{
/*
* Hit max in previous round, stop here
*/
if (rqd->scaled_max)
- return;
+ return false;
rqd->scale_step--;
rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+ return true;
}
/*
* Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
*/
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
{
/*
* Stop scaling down when we've hit the limit. This also prevents
@@ -185,7 +188,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
* keep up.
*/
if (rqd->max_depth == 1)
- return;
+ return false;
if (rqd->scale_step < 0 && hard_throttle)
rqd->scale_step = 0;
@@ -194,6 +197,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
rqd->scaled_max = false;
rq_depth_calc_max_depth(rqd);
+ return true;
}
struct rq_qos_wait_data {
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 08a09dbe0f4b..2bc43e94f4c4 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -108,16 +108,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
- struct rq_qos *cur, *prev = NULL;
- for (cur = q->rq_qos; cur; cur = cur->next) {
- if (cur == rqos) {
- if (prev)
- prev->next = rqos->next;
- else
- q->rq_qos = cur;
+ struct rq_qos **cur;
+
+ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ if (*cur == rqos) {
+ *cur = rqos->next;
break;
}
- prev = cur;
}
blk_mq_debugfs_unregister_rqos(rqos);
@@ -130,8 +127,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
acquire_inflight_cb_t *acquire_inflight_cb,
cleanup_cb_t *cleanup_cb);
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8af553a0ba00..8641ba9793c5 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
static void scale_up(struct rq_wb *rwb)
{
- rq_depth_scale_up(&rwb->rq_depth);
+ if (!rq_depth_scale_up(&rwb->rq_depth))
+ return;
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_wake_all(rwb);
@@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
- rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+ if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+ return;
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_trace_step(rwb, "scale down");
diff --git a/block/elevator.c b/block/elevator.c
index 5437059c9261..076ba7308e65 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -616,7 +616,8 @@ out:
static inline bool elv_support_iosched(struct request_queue *q)
{
- if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+ if (!q->mq_ops ||
+ (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
return false;
return true;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3b2525908dd8..a1a858ad4d18 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
pcc_data[pcc_ss_id]->refcount--;
if (!pcc_data[pcc_ss_id]->refcount) {
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
kfree(pcc_data[pcc_ss_id]);
+ pcc_data[pcc_ss_id] = NULL;
}
}
}
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index 8f9a28a870b0..8b0de8a3c647 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
p->flags, p->processor_PD, p->memory_PD);
- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+ if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2261713d1aec..930a49fa4dfc 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -162,21 +162,23 @@ void acpi_processor_ppc_init(int cpu)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
+ if (!pr)
+ return;
+
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
&pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
INT_MAX);
- if (ret < 0) {
+ if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
- return;
- }
}
void acpi_processor_ppc_exit(int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
- dev_pm_qos_remove_request(&pr->perflib_req);
+ if (pr)
+ dev_pm_qos_remove_request(&pr->perflib_req);
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index ec2638f1df4f..8227c7dd75b1 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -130,21 +130,23 @@ void acpi_thermal_cpufreq_init(int cpu)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
+ if (!pr)
+ return;
+
ret = dev_pm_qos_add_request(get_cpu_device(cpu),
&pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
INT_MAX);
- if (ret < 0) {
+ if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret);
- return;
- }
}
void acpi_thermal_cpufreq_exit(int cpu)
{
struct acpi_processor *pr = per_cpu(processors, cpu);
- dev_pm_qos_remove_request(&pr->thermal_req);
+ if (pr)
+ dev_pm_qos_remove_request(&pr->thermal_req);
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9fa77d72ef27..2af937a8b1c5 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -362,19 +362,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
},
},
/*
- * https://bugzilla.kernel.org/show_bug.cgi?id=196907
- * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
- * S0 Idle firmware interface.
- */
- {
- .callback = init_default_s3,
- .ident = "Dell XPS13 9360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
- },
- },
- /*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
* https://bugzilla.kernel.org/show_bug.cgi?id=199057).
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c0a491277aca..5b9ac2122e89 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -57,6 +57,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
@@ -66,6 +67,7 @@
#include <linux/task_work.h>
#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
#include <asm/cacheflush.h>
@@ -2876,7 +2878,7 @@ static void binder_transaction(struct binder_proc *proc,
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
- e->context_name = proc->context->name;
+ strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
if (reply) {
binder_inner_proc_lock(proc);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6d79a1b0d446..d42a8b2f636a 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -156,7 +156,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
}
/**
- * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * binder_alloc_prepare_to_free() - get buffer given user ptr
* @alloc: binder_alloc for this proc
* @user_ptr: User pointer to buffer data
*
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index bd47f7f72075..ae991097d14d 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -130,7 +130,7 @@ struct binder_transaction_log_entry {
int return_error_line;
uint32_t return_error;
uint32_t return_error_param;
- const char *context_name;
+ char context_name[BINDERFS_MAX_NAME + 1];
};
struct binder_transaction_log {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dd92faf197d5..05c2b32dcc4d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+ /* Skip applying the quirk on Denverton and beyond */
+ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
return;
/*
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 76d0f9de767b..58e09ffe8b9c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
- /*
- * XXX - UGLY HACK
- *
- * The block layer suspend/resume path is fundamentally broken due
- * to freezable kthreads and workqueue and may deadlock if a block
- * device gets removed while resume is in progress. I don't know
- * what the solution is short of removing freezable kthreads and
- * workqueues altogether.
- *
- * The following is an ugly hack to avoid kicking off device
- * removal while freezer is active. This is a joke but does avoid
- * this particular deadlock scenario.
- *
- * https://bugzilla.kernel.org/show_bug.cgi?id=62801
- * http://marc.info/?l=linux-kernel&m=138695698516487
- */
-#ifdef CONFIG_FREEZER
- while (pm_freezing)
- msleep(10);
-#endif
-
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2db62d98e395..7bd9cd366d41 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@@ -3179,6 +3180,8 @@ void device_shutdown(void)
wait_for_device_probe();
device_block_probing();
+ cpufreq_suspend();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 6bea4f3f8040..55907c27075b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
+ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+ if (!pfn_to_online_page(pfn))
+ return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b6c6c7d97d5b..b230beb6ccb4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -241,12 +241,8 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
-/**
- * platform_get_irq_byname - get an IRQ for a device by name
- * @dev: platform device
- * @name: IRQ name
- */
-int platform_get_irq_byname(struct platform_device *dev, const char *name)
+static int __platform_get_irq_byname(struct platform_device *dev,
+ const char *name)
{
struct resource *r;
@@ -262,12 +258,48 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
if (r)
return r->start;
- dev_err(&dev->dev, "IRQ %s not found\n", name);
return -ENXIO;
}
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an IRQ like platform_get_irq(), but then by name rather then by index.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+ int ret;
+
+ ret = __platform_get_irq_byname(dev, name);
+ if (ret < 0 && ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "IRQ %s not found\n", name);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
/**
+ * platform_get_irq_byname_optional - get an optional IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname_optional(struct platform_device *dev,
+ const char *name)
+{
+ return __platform_get_irq_byname(dev, name);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
+
+/**
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac07e8c94c79..478aa86fc1f2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd)
if (refcount_dec_and_mutex_lock(&nbd->refs,
&nbd_index_mutex)) {
idr_remove(&nbd_index_idr, nbd->index);
- mutex_unlock(&nbd_index_mutex);
nbd_dev_remove(nbd);
+ mutex_unlock(&nbd_index_mutex);
}
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index eabc116832a7..3d7fdea872f8 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->wp = zone->start;
break;
default:
- cmd->error = BLK_STS_NOTSUPP;
- break;
+ return BLK_STS_NOTSUPP;
}
return BLK_STS_OK;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7c4350c0fb77..39136675dae5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
- if (ret > 0)
+ if (ret > 0) {
ret = rbd_dev->acquire_err;
- else if (!ret)
- ret = -ETIMEDOUT;
+ } else {
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ }
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d58a359a6622..4285e75e52c3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct file *file;
struct zram *zram = dev_to_zram(dev);
- struct file *file = zram->backing_dev;
char *p;
ssize_t ret;
down_read(&zram->init_lock);
- if (!zram->backing_dev) {
+ file = zram->backing_dev;
+ if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c52d6fa32aac..bffc11b87247 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2737,14 +2737,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
- .shutdown = cpufreq_suspend,
-};
-
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@@ -2756,8 +2748,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
-
return 0;
}
module_param(off, int, 0444);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 433d91d710e4..d377b4ca66bf 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return ret;
}
@@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@@ -525,6 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ exp_info->ops->dynamic_mapping))
+ return ERR_PTR(-EINVAL);
+
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@@ -645,10 +649,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -662,8 +667,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@@ -677,25 +683,69 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
+ attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
+ dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
- mutex_unlock(&dmabuf->lock);
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unlock;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
return attach;
err_attach:
kfree(attach);
- mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, false);
+}
EXPORT_SYMBOL_GPL(dma_buf_attach);
/**
@@ -711,15 +761,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
- if (attach->sgt)
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- mutex_lock(&dmabuf->lock);
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
- mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -749,6 +806,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@@ -761,6 +821,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -793,9 +856,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt == sg_table)
return;
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -1171,13 +1240,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
- ret = mutex_lock_interruptible(&buf_obj->lock);
- if (ret) {
- seq_puts(s,
- "\tERROR locking buffer object: skipping\n");
- continue;
- }
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@@ -1223,19 +1289,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
- mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 35ed56b9c34f..1e21fc3e9851 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
bytes = ~0ull;
else if (size & 0x8000)
bytes = (u64)(size & 0x7fff) << 10;
- else if (size != 0x7fff)
+ else if (size != 0x7fff || dm->length < 0x20)
bytes = (u64)size << 20;
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index addf0749dd8b..b1af0de2e100 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
- printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8d3e778e988b..69f00f7453a3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -267,6 +267,9 @@ static __init int efivar_ssdt_load(void)
void *data;
int ret;
+ if (!efivar_ssdt[0])
+ return 0;
+
ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
list_for_each_entry_safe(entry, aux, &entries, list) {
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 3e290f96620a..76b0c354a027 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -76,7 +76,7 @@ static u16 checksum(void)
return chksum;
}
-int __init efi_rci2_sysfs_init(void)
+static int __init efi_rci2_sysfs_init(void)
{
struct kobject *tables_kobj;
int ret = -ENOMEM;
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 1d3f5ca3eaaf..ebd7977653a8 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -40,7 +40,7 @@ int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
- unsigned int tbl_size;
+ int tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -75,16 +75,28 @@ int __init efi_tpm_eventlog_init(void)
goto out;
}
- tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
- + sizeof(final_tbl->version)
- + sizeof(final_tbl->nr_events),
- final_tbl->nr_events,
- log_tbl->log);
+ tbl_size = 0;
+ if (final_tbl->nr_events != 0) {
+ void *events = (void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events);
+
+ tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
+ }
+
+ if (tbl_size < 0) {
+ pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ goto out_calc;
+ }
+
memblock_reserve((unsigned long)final_tbl,
tbl_size + sizeof(*final_tbl));
- early_memunmap(final_tbl, sizeof(*final_tbl));
efi_tpm_final_log_size = tbl_size;
+out_calc:
+ early_memunmap(final_tbl, sizeof(*final_tbl));
out:
early_memunmap(log_tbl, sizeof(*log_tbl));
return ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index dda525c0f968..5c6f2a74f104 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
if (max_len - consumed < *entry_len)
return VPD_FAIL;
- consumed += decoded_len;
+ consumed += *entry_len;
*_consumed = consumed;
return VPD_OK;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index fe7a73f52329..bb287f35cf40 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -530,11 +530,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
}
for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
- girq = irq_find_mapping(chip->irq.domain,
- bank * SPRD_EIC_PER_BANK_NR + n);
+ u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+ girq = irq_find_mapping(chip->irq.domain, offset);
generic_handle_irq(girq);
- sprd_eic_toggle_trigger(chip, girq, n);
+ sprd_eic_toggle_trigger(chip, girq, offset);
}
}
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 4d835f9089df..86a10c808ef6 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
chip->irq_eoi(data);
}
-static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+static int intel_mid_irq_init_hw(struct gpio_chip *chip)
{
+ struct intel_mid_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned base;
@@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
reg = gpio_reg(&priv->chip, base, GEDR);
writel(~0, reg);
}
+
+ return 0;
}
static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
@@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq = &priv->chip.irq;
girq->chip = &intel_mid_irqchip;
+ girq->init_hw = intel_mid_irq_init_hw;
girq->parent_handler = intel_mid_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
- intel_mid_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
+
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 6bb9741ad036..e9e47c0d5be7 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
{
+ struct lp_gpio *lg = gpiochip_get_data(chip);
unsigned long reg;
unsigned base;
@@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
outl(0xffffffff, reg);
}
+
+ return 0;
}
static int lp_gpio_probe(struct platform_device *pdev)
@@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
girq = &gc->irq;
girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
girq->parent_handler = lp_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = (unsigned)irq_rc->start;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- lp_gpio_irq_init_hw(lg);
+ girq->handler = handle_bad_irq;
}
ret = devm_gpiochip_add_data(dev, gc, lg);
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 47d05e357e61..faf86ea9c51a 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1 ... 8:
+ case 1000 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9 ... 16:
+ case 9000 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17 ... 32:
+ case 17000 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 4f27ddfe1e2f..2f1e9da81c1e 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -362,8 +362,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@@ -375,6 +376,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
+
+ return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -447,6 +450,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
+ girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -455,11 +459,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- mrfld_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 1eea2c6c2e1d..80ea49f570f4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -317,7 +317,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
transitory = flags & OF_GPIO_TRANSITORY;
ret = gpiod_request(desc, label);
- if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+ if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
return desc;
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bdbc1649eafa..104ed299d5ea 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
machine_gpiochip_add(chip);
+ ret = gpiochip_irqchip_init_hw(chip);
+ if (ret)
+ goto err_remove_acpi_chip;
+
ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
@@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name)
* The following is irqchip helper code for gpiochips.
*/
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
+{
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ if (!girq->init_hw)
+ return 0;
+
+ return girq->init_hw(gc);
+}
+
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
struct gpio_irq_chip *girq = &gc->irq;
@@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
{
return 0;
}
-
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+{
+ return 0;
+}
+
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
return 0;
@@ -3070,8 +3090,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
- if (value)
- return gpiod_direction_input(desc);
+ if (value) {
+ ret = gpiod_direction_input(desc);
+ goto set_output_flag;
+ }
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
@@ -3079,8 +3101,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
- if (!value)
- return gpiod_direction_input(desc);
+ if (!value) {
+ ret = gpiod_direction_input(desc);
+ goto set_output_flag;
+ }
} else {
gpio_set_config(gc, gpio_chip_hwgpio(desc),
PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -3088,6 +3112,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
set_output_value:
return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+ /*
+ * When emulating open-source or open-drain functionalities by not
+ * actively driving the line (setting mode to input) we still need to
+ * set the IS_OUT flag or otherwise we won't be able to set the line
+ * value anymore.
+ */
+ if (ret == 0)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -3448,8 +3483,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
if (value) {
ret = chip->direction_input(chip, offset);
- if (!ret)
- clear_bit(FLAG_IS_OUT, &desc->flags);
} else {
ret = chip->direction_output(chip, offset, 0);
if (!ret)
@@ -3479,8 +3512,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
ret = chip->direction_input(chip, offset);
- if (!ret)
- clear_bit(FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index caeef11976e8..617d9c3a86c3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER
help
FBDEV helpers for KMS drivers.
+config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ select STACKDEPOT
+ depends on DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debug tracing for topology refs in DRM's DP MST helpers. A
+ history of each topology reference/dereference will be printed to the
+ kernel log once a port or branch device's topology refcount reaches 0.
+
+ This has the potential to use a lot of memory and print some very
+ large kernel messages. If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
@@ -232,9 +246,9 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_SCHED
- select DRM_TTM
+ select DRM_TTM
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -263,6 +277,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select CRC32
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
@@ -403,7 +418,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
+ # !PREEMPTION because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 00962a659009..ca0e435559d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o
+ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
# add DF block
amdgpu-y += \
@@ -83,7 +84,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o
+ umc_v6_1.o umc_v6_0.o
# add IH block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bd37df5dd6d0..4a2b413c8a25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -106,6 +107,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
+
/*
* Modules parameters.
*/
@@ -122,6 +125,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -146,11 +150,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -167,6 +167,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -283,6 +289,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -425,7 +434,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -477,7 +485,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -624,6 +631,11 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* Offset on the top of VRAM, used as c2p write buffer.
+ */
+ u64 mem_train_fb_loc;
+ bool mem_train_support;
};
/*
@@ -644,71 +656,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
- u32 ref_and_mask_sdma2;
- u32 ref_and_mask_sdma3;
- u32 ref_and_mask_sdma4;
- u32 ref_and_mask_sdma5;
- u32 ref_and_mask_sdma6;
- u32 ref_and_mask_sdma7;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index, int instance);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -813,6 +768,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -921,6 +877,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -974,9 +936,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_mmhub_funcs *mmhub_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1009,8 +969,6 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -1032,6 +990,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 07eb29885372..8c531793fe17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void)
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
- const struct kfd2kgd_calls *kfd2kgd;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
- break;
- case CHIP_ARCTURUS:
- kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
- break;
- default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
- return;
- }
+ bool vf = amdgpu_sriov_vf(adev);
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -202,7 +167,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
}
}
@@ -709,38 +674,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
return 0;
}
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
-{
- return NULL;
-}
-
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g)
+ unsigned int asic_type, bool vf)
{
return NULL;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e519df3fd2b6..069d5d230810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -57,7 +57,7 @@ struct kgd_mem {
unsigned int mapped_to_gpu_memory;
uint64_t va;
- uint32_t mapping_flags;
+ uint32_t alloc_flags;
atomic_t invalid;
struct amdkfd_process_info *process_info;
@@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
-
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
@@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_sem.
+ */
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
@@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
+ pagefault_enable(); \
} \
valid; \
})
@@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g);
+ unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index c79aaebeeaf0..e1fbbebce4fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -69,11 +69,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[8] = {
+ uint32_t sdma_engine_reg_base[8] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
@@ -91,111 +91,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA7, 0,
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
-static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
- u32 instance, u32 offset)
-{
- switch (instance) {
- case 0:
- return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
- case 1:
- return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
- case 2:
- return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
- case 3:
- return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
- case 4:
- return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
- case 5:
- return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
- case 6:
- return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
- case 7:
- return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
- default:
- break;
- }
- return 0;
-}
-
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
- m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -205,7 +176,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -215,15 +187,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -235,14 +207,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -255,40 +227,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -304,20 +278,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index d10f483f5e27..0878f59ec340 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -22,15 +22,9 @@
#undef pr_fmt
#define pr_fmt(fmt) "kfd2kgd: " fmt
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "navi10_enum.h"
@@ -42,6 +36,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -50,63 +45,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-#if 0
-static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
-#endif
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -139,37 +77,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -250,11 +157,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
ATC_VMID0_PASID_MAPPING__VALID_MASK;
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
- /*
- * need to do this twice, once for gfx and once for mmhub
- * for ATC add 16 to VMID for mmhub, for IH different registers.
- * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
- */
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
@@ -306,11 +208,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
@@ -322,12 +224,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -488,72 +390,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -563,28 +460,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
- pr_debug("sdma base addr %x\n", sdma_base_addr);
-
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -618,14 +513,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -746,59 +641,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -830,6 +718,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
if (amdgpu_emu_mode == 0 && ring->sched.ready)
@@ -838,13 +728,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
+
+ ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ break;
}
}
@@ -914,7 +804,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -922,18 +811,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ /* SDMA is on gfxhub as well for Navi1* series */
+ gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5f459bf5f622..6e6f0a99ec06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6d2f61449606..bfbddedb2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,9 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -44,62 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdmax_rlcx_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e262f2ac07a3..c72246f2c08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -22,14 +22,10 @@
#define pr_fmt(fmt) "kfd2kgd: " fmt
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -50,9 +46,6 @@
#include "gmc_v9_0.h"
-#define V9_PIPE_PER_MEC (4)
-#define V9_QUEUES_PER_PIPE_MEC (8)
-
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
@@ -226,22 +219,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
- uint32_t retval;
-
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -388,71 +380,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -462,7 +450,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -472,15 +461,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -514,14 +503,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -584,59 +573,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
@@ -671,6 +653,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid, i;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
uint32_t flush_type = 0;
@@ -686,14 +670,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
+
+ ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ break;
}
}
@@ -777,15 +761,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid)
-{
- /* No longer needed on GFXv9. The scratch base address is
- * passed to the shader by the CP. It's the user mode driver's
- * responsibility.
- */
-}
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
@@ -811,7 +786,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -827,19 +802,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 26d8879bff9d..d9e9ad22b2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6d021ecc8d59..d2e7b4fe5659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -33,11 +33,6 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-/* Special VM and GART address alignment needed for VI pre-Fiji due to
- * a HW bug.
- */
-#define VI_BO_SIZE_ALIGN (0x8000)
-
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -349,13 +344,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_update_directories(adev, vm);
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ uint32_t mapping_flags;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev)
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else {
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
+ default:
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
+}
+
/* add_bo_to_vm - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
@@ -404,8 +432,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
}
bo_va_entry->va = va;
- bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
- mem->mapping_flags);
+ bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
@@ -586,7 +613,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +686,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -1079,10 +1106,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
- int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
- uint32_t mapping_flags;
int ret;
/*
@@ -1135,25 +1160,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if ((*mem)->aql_queue)
size = size >> 1;
- /* Workaround for TLB bug on older VI chips */
- byte_align = (adev->family == AMDGPU_FAMILY_VI &&
- adev->asic_type != CHIP_FIJI &&
- adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12 &&
- adev->asic_type != CHIP_VEGAM) ?
- VI_BO_SIZE_ALIGN : 1;
-
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
- mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
- mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- if (flags & ALLOC_MEM_FLAGS_COHERENT)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- (*mem)->mapping_flags = mapping_flags;
+ (*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
@@ -1168,7 +1175,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
memset(&bp, 0, sizeof(bp));
bp.size = size;
- bp.byte_align = byte_align;
+ bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
@@ -1626,9 +1633,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->mapping_flags =
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
+ ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -1797,8 +1805,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1996,7 +2003,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1c9d40f97a9b..72232fccf61a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ if (ret) {
+ DRM_ERROR("Failed to get mem train fb location.\n");
+ return ret;
+ }
} else {
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index daf687428cdb..ff4eb96bdfb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -27,6 +27,7 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
@@ -120,65 +121,14 @@ union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
};
-/*
- * Return vram width from integrated system info table, if available,
- * or 0 if not.
- */
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
- u16 data_offset, size;
- union igp_info *igp_info;
- union vram_info *vram_info;
- u32 mem_channel_number;
- u32 mem_channel_width;
- u8 frev, crev;
-
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- integratedsysteminfo);
- else
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- vram_info);
- /* get any igp specific overrides */
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- return mem_channel_number * 64;
- default:
- return 0;
- }
- } else {
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 3:
- mem_channel_number = vram_info->v23.vram_module[0].channel_num;
- mem_channel_width = vram_info->v23.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- case 4:
- mem_channel_number = vram_info->v24.vram_module[0].channel_num;
- mem_channel_width = vram_info->v24.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- default:
- return 0;
- }
- }
- }
-
- return 0;
-}
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+};
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
- int atom_mem_type)
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
{
int vram_type;
@@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
return vram_type;
}
-/*
- * Return vram type from either integrated system info table
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
- */
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
+ int index, i = 0;
u16 data_offset, size;
union igp_info *igp_info;
union vram_info *vram_info;
+ union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
else
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
@@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
- return 0;
+ return -EINVAL;
}
} else {
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
- mem_type = vram_info->v23.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
case 4:
- mem_type = vram_info->v24.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
- return 0;
+ return -EINVAL;
}
}
+
}
return 0;
@@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
}
return -EINVAL;
}
+
+/*
+ * Check if VBIOS supports GDDR6 training data save/restore
+ */
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
+{
+ uint16_t data_offset;
+ int index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
+ NULL, NULL, &data_offset)) {
+ struct atom_firmware_info_v3_1 *firmware_info =
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
+ data_offset);
+
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
+ le32_to_cpu(firmware_info->firmware_capability));
+
+ if (le32_to_cpu(firmware_info->firmware_capability) &
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
+ return true;
+ }
+
+ return false;
+}
+
+static int gddr6_mem_train_support(struct amdgpu_device *adev)
+{
+ int ret;
+ uint32_t major, minor, revision, hw_v;
+
+ if (gddr6_mem_train_vbios_support(adev)) {
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
+ hw_v = HW_REV(major, minor, revision);
+ /*
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
+ * none of the functions will be initialized, it should not cause any problems
+ */
+ switch (hw_v) {
+ case HW_REV(11, 0, 0):
+ case HW_REV(11, 0, 5):
+ ret = 1;
+ break;
+ default:
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
+ " doesn't support!\n", hw_v);
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = 0;
+ hw_v = -1;
+ }
+
+
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ unsigned char *bios = ctx->bios;
+ struct vram_reserve_block *reserved_block;
+ int index, block_number;
+ uint8_t frev, crev;
+ uint16_t data_offset, size;
+ uint32_t start_address_in_kb;
+ uint64_t offset;
+ int ret;
+
+ adev->fw_vram_usage.mem_train_support = false;
+
+ if (adev->asic_type != CHIP_NAVI10 &&
+ adev->asic_type != CHIP_NAVI14)
+ return 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = gddr6_mem_train_support(adev);
+ if (ret == -1)
+ return -EINVAL;
+ else if (ret == 0)
+ return 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ vram_usagebyfirmware);
+ ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset);
+ if (ret == 0) {
+ DRM_ERROR("parse data header failed.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
+ " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
+ /* only support 2.1+ */
+ if (((uint16_t)frev << 8 | crev) < 0x0201) {
+ DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
+ return -EINVAL;
+ }
+
+ reserved_block = (struct vram_reserve_block *)
+ (bios + data_offset + sizeof(struct atom_common_table_header));
+ block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
+ / sizeof(struct vram_reserve_block);
+ reserved_block += (block_number > 0) ? block_number-1 : 0;
+ DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
+ block_number,
+ le32_to_cpu(reserved_block->start_address_in_kb),
+ le16_to_cpu(reserved_block->used_by_firmware_in_kb),
+ le16_to_cpu(reserved_block->used_by_driver_in_kb));
+ if (reserved_block->used_by_firmware_in_kb > 0) {
+ start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
+ offset = (uint64_t)start_address_in_kb * ONE_KiB;
+ if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
+ offset -= ONE_MiB;
+ }
+
+ offset &= ~(ONE_MiB - 1);
+ adev->fw_vram_usage.mem_train_fb_loc = offset;
+ adev->fw_vram_usage.mem_train_support = true;
+ DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
+ ret = 0;
+ } else {
+ DRM_ERROR("used_by_firmware_in_kb is 0!\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 5ec6f92f353c..f871af5ea6f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -29,8 +29,9 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..a97fb759e2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- vga_count++;
-
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
-
- parent_pdev = pci_upstream_bridge(pdev);
- d3_supported |= parent_pdev && parent_pdev->bridge_d3;
- amdgpu_atpx_get_quirks(pdev);
- }
-
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 7bcf86c61999..85b0515c0fdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0;
error_free:
- while (i--) {
+ for (i = 0; i < last_entry; ++i) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+ amdgpu_bo_unref(&bo);
+ }
+ for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
@@ -270,7 +275,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
if (r)
- goto error_free;
+ return r;
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
@@ -283,8 +288,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- amdgpu_bo_list_put(list);
- return r;
+ goto error_put_list;
}
handle = r;
@@ -306,9 +310,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&fpriv->bo_list_lock);
if (IS_ERR(old)) {
- amdgpu_bo_list_put(list);
r = PTR_ERR(old);
- goto error_free;
+ goto error_put_list;
}
amdgpu_bo_list_put(old);
@@ -325,8 +328,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
+error_put_list:
+ amdgpu_bo_list_put(list);
+
error_free:
- if (info)
- kvfree(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index d8729285f731..a62cbc8199de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1019,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
@@ -1037,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
}
}
@@ -1494,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
@@ -1508,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1526,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2e53feed40e2..a169ff16277f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -449,75 +450,12 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- struct ttm_operation_ctx ctx = { true, false };
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool update_bytes_moved_vis;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (bo == validated)
- break;
-
- /* We can't move pinned BOs here */
- if (bo->pin_count)
- continue;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- update_bytes_moved_vis =
- !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_bo_placement_from_domain(bo, other);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- p->bytes_moved += ctx.bytes_moved;
- if (update_bytes_moved_vis)
- p->bytes_moved_vis += ctx.bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -536,7 +474,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- bool binding_userptr = false;
struct mm_struct *usermm;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,20 +490,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
- binding_userptr = true;
}
- if (p->evictable == lobj)
- p->evictable = NULL;
-
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (binding_userptr) {
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
+ kvfree(lobj->user_pages);
+ lobj->user_pages = NULL;
}
return 0;
}
@@ -650,7 +581,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
@@ -661,9 +592,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
@@ -915,7 +843,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
if (r)
return r;
@@ -1359,6 +1287,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
bool reserved_buffers = false;
int i, r;
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
+
if (!adev->accel_working)
return -EBUSY;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 35a8d3c96fc9..08047bc4d588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5652cc72ed3a..cb94627fc0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1077,8 +1077,7 @@ failure:
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (fences)
- kfree(fences);
+ kfree(fences);
return 0;
}
@@ -1103,8 +1102,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
{
- if (adev->debugfs_preempt)
- debugfs_remove(adev->debugfs_preempt);
+ debugfs_remove(adev->debugfs_preempt);
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5a1939dbd4e3..46218b36dc9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include <linux/suspend.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev)
return false;
}
+/**
+ * VRAM access helper functions.
+ *
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write)
+{
+ uint64_t last;
+ unsigned long flags;
+
+ last = size - 4;
+ for (last += pos; pos <= last; pos += 4) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *buf++);
+ else
+ *buf++ = RREG32_NO_KIQ(mmMM_DATA);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+}
+
/*
* MMIO register access helper functions.
*/
@@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_device_check_block_size(adev);
- ret = amdgpu_device_get_job_timeout_settings(adev);
- if (ret) {
- dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return ret;
- }
-
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
return ret;
@@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
+
+parse_soc_bounding_box:
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware.
+ */
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
@@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ amdgpu_discovery_get_gfx_info(adev);
+
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
@@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -2231,17 +2269,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
if (is_support_sw_smu(adev)) {
- /* todo */
+ r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
r = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
- }
+ }
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
}
}
@@ -2556,6 +2594,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res, adev->ddev->unique);
}
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ * In SR-IOV or passthrough mode, timeout for compute
+ * jobs are 10000 by default.
+ */
+ adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ else
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
+ }
+
+ return ret;
+}
/**
* amdgpu_device_init - initialize the driver
@@ -2583,7 +2688,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
@@ -2726,6 +2836,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
+ }
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3007,6 +3123,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -3029,9 +3146,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- }
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -3082,15 +3201,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- pci_save_state(dev->pdev);
if (suspend) {
+ pci_save_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
- if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
}
return 0;
@@ -3110,6 +3225,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r = 0;
@@ -3180,9 +3296,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* turn on display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
@@ -3628,11 +3748,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
-
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- amdgpu_ras_reserve_bad_pages(tmp_adev);
- }
}
}
@@ -3736,25 +3851,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
adev->mp1_state = PP_MP1_STATE_NONE;
break;
}
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -3774,11 +3882,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU reset begin!\n");
+ dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3805,9 +3926,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return 0;
}
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Build list of devices to reset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_device_unlock_adev(adev);
return -ENODEV;
}
@@ -3823,17 +3951,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
- /*
- * Mark these ASICs to be reseted as untracked first
- * And add them back after reset completed
- */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
- amdgpu_unregister_gpu_instance(tmp_adev);
-
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev != adev) {
+ amdgpu_device_lock_adev(tmp_adev, false);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+ }
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
/* disable ras on ALL IPs */
- if (amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3843,10 +3976,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+ if (in_ras_intr)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
+ if (in_ras_intr)
+ goto skip_sched_resume;
+
/*
* Must check guilty signal here since after this point all old
* HW fences are force signaled.
@@ -3857,9 +3996,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dma_fence_is_signaled(job->base.s_fence->parent))
job_signaled = true;
- if (!amdgpu_device_ip_need_full_reset(adev))
- device_list_handle = &device_list;
-
if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -3881,7 +4017,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3909,6 +4044,7 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -3930,12 +4066,18 @@ skip_hw_reset:
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
}
+ }
+skip_sched_resume:
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1481899f86c1..f95092741c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
- uint32_t *p = (uint32_t *)binary;
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - BINARY_MAX_SIZE;
- unsigned long flags;
-
- while (pos < vram_size) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
- *p++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- pos += 4;
- }
+ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
return 0;
}
@@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
+ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
return -ENOMEM;
@@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor)
+ int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
*major = ip->major;
if (minor)
*minor = ip->minor;
+ if (revision)
+ *revision = ip->revision;
return 0;
}
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 85b8c4d4d576..ba78e15d9b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,11 +24,13 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#define DISCOVERY_TMR_SIZE (64 << 10)
+
int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor);
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1d4aaa9580f4..d2dd59a95e8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 61f108ec2b5c..e2eec7b66334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -34,27 +34,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
- * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
- * implementation
- * @obj: GEM buffer object (BO)
- *
- * Returns:
- * A scatter/gather table for the pinned pages of the BO's memory.
- */
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
@@ -179,92 +164,126 @@ err_fences_put:
}
/**
- * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+ *
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
+ *
+ * Add the attachment as user to the exported DMA-buf.
+ */
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ if (attach->dev->driver == adev->dev->driver)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ /*
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
+ */
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
+ if (r)
+ return r;
+
+ bo->prime_shared_count++;
+ amdgpu_bo_unreserve(bo);
+ return 0;
+}
+
+/**
+ * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
+ *
+ * @dmabuf: DMA-buf where we remove the attachment from
+ * @attach: the attachment to remove
+ *
+ * Called when an attachment is removed from the DMA-buf.
+ */
+static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+ bo->prime_shared_count--;
+}
+
+/**
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
+ * @dir: DMA direction
*
* Makes sure that the shared DMA buffer can be accessed by the target device.
* For now, simply pins it to the GTT domain, where it should be accessible by
* all DMA devices.
*
* Returns:
- * 0 on success or a negative error code on failure.
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
*/
-static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
+ struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct sg_table *sgt;
long r;
- r = drm_gem_map_attach(dma_buf, attach);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto error_detach;
-
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * We only create shared fences for internal use, but importers
- * of the dmabuf rely on exclusive fences for implicitly
- * tracking write hazards. As any of the current fences may
- * correspond to a write, we need to convert all existing
- * fences on the reservation object into a single exclusive
- * fence.
- */
- r = __dma_resv_make_exclusive(bo->tbo.base.resv);
- if (r)
- goto error_unreserve;
- }
-
- /* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
- goto error_unreserve;
+ return ERR_PTR(r);
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
-error_detach:
- if (r)
- drm_gem_map_detach(dma_buf, attach);
- return r;
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
}
/**
- * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
* @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
-static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int ret = 0;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- goto error;
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
-
-error:
- drm_gem_map_detach(dma_buf, attach);
}
/**
@@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_dma_buf_map_attach,
- .detach = amdgpu_dma_buf_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .dynamic_mapping = true,
+ .attach = amdgpu_dma_buf_attach,
+ .detach = amdgpu_dma_buf_detach,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
/**
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
- * @dev: DRM device
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
@@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
}
/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
* @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
+ * @dma_buf: DMA-buf
*
- * Imports shared DMA buffer memory exported by another device.
+ * Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
- struct dma_resv *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
+ bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
@@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto error;
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
@@ -405,15 +419,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
- * The main work is done by the &drm_gem_prime_import helper, which in turn
- * uses &amdgpu_gem_prime_import_sg_table.
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, dma_buf);
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 5012e6ab58f1..ec447a7b6b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -25,11 +25,6 @@
#include <drm/drm_gem.h>
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index baf32484b820..7d16e6edda62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -43,6 +43,8 @@
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -87,8 +89,6 @@
#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
-#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
-
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -128,11 +128,7 @@ char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_force_long_training = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
@@ -146,12 +142,13 @@ int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry = 1;
+int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xfffffffb;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -244,16 +241,21 @@ module_param_named(msi, amdgpu_msi, int, 0444);
*
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to default timeout.
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
- * And the third and fourth ones are for SDMA and Video.
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
* jobs is 10000. And there is no timeout enforced on compute jobs.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
+ "for passthrough or sriov, 10000 for all jobs."
" 0: keep default value. negative: infinity timeout), "
- "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+ "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -392,6 +394,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+
+/**
* DOC: pcie_gen_cap (uint)
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
* The default is 0 (automatic for each asic).
@@ -449,42 +459,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: ngg (int)
- * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
- */
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
-
-/**
- * DOC: prim_buf_per_se (int)
- * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
-
-/**
- * DOC: pos_buf_per_se (int)
- * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
-
-/**
- * DOC: cntl_sb_buf_per_se (int)
- * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
-
-/**
- * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
- * The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
-
-/**
* DOC: job_hang_limit (int)
* Set how much time allow a job hang and not drop it. The default is 0.
*/
@@ -616,6 +590,16 @@ MODULE_PARM_DESC(noretry,
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -1022,6 +1006,7 @@ static const struct pci_device_id pciidlist[] = {
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
};
@@ -1048,6 +1033,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
+ if (!amdgpu_si_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ if (!amdgpu_cik_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Get rid of things like offb */
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
@@ -1092,7 +1112,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
+#ifdef MODULE
+ if (THIS_MODULE->state != MODULE_STATE_GOING)
+#endif
+ DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
drm_dev_put(dev);
pci_disable_device(pdev);
@@ -1105,6 +1128,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
+ if (amdgpu_ras_intr_triggered())
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -1139,8 +1165,13 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int r;
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, false, true);
+ if (r)
+ return r;
+ return amdgpu_asic_reset(adev);
}
static int amdgpu_pmops_thaw(struct device *dev)
@@ -1312,66 +1343,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
-{
- char *input = amdgpu_lockup_timeout;
- char *timeout_setting = NULL;
- int index = 0;
- long timeout;
- int ret = 0;
-
- /*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
-
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
-
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
-
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
- }
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1)
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- }
-
- return ret;
-}
-
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1410,8 +1381,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 571a6dfb473e..61fcf247a638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
@@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 23085b352cf2..377fe20bce23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timeout = adev->gfx_timeout;
break;
case AMDGPU_RING_TYPE_COMPUTE:
- /*
- * For non-sriov case, no timeout enforce
- * on compute ring by default. Unless user
- * specifies a timeout for compute ring.
- *
- * For sriov case, always use the timeout
- * as gfx ring
- */
- if (!amdgpu_sriov_vf(ring->adev))
- timeout = adev->compute_timeout;
- else
- timeout = adev->gfx_timeout;
+ timeout = adev->compute_timeout;
break;
case AMDGPU_RING_TYPE_SDMA:
timeout = adev->sdma_timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5e8bdded265f..19705e399905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+ struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ceb44925947..4277125a79ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error;
}
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
+/**
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: GEM UAPI flags
+ *
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
+ */
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
+{
+ uint64_t pte_flag = 0;
+
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
+ if (flags & AMDGPU_VM_PAGE_READABLE)
+ pte_flag |= AMDGPU_PTE_READABLE;
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
+ if (flags & AMDGPU_VM_PAGE_PRT)
+ pte_flag |= AMDGPU_PTE_PRT;
+
+ if (adev->gmc.gmc_funcs->map_mtype)
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
+ flags & AMDGPU_VM_MTYPE_MASK);
+
+ return pte_flag;
+}
+
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 0b66d2e6b5d5..e0f025dd1b14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f9bef3154b99..069515f57c2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, me;
+ int i, queue, me;
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
queue = i % adev->gfx.me.num_queue_per_pipe;
- pipe = (i / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
me = (i / adev->gfx.me.num_queue_per_pipe)
/ adev->gfx.me.num_pipe_per_me;
@@ -569,3 +568,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ if (!adev->gfx.ras_if) {
+ adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gfx.ras_if)
+ return -ENOMEM;
+ adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if->sub_block_index = 0;
+ strcpy(adev->gfx.ras_if->name, "gfx");
+ }
+ fs_info.head = ih_info.head = *adev->gfx.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ /* free gfx ras_if if ras is not supported */
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
+free:
+ kfree(adev->gfx.ras_if);
+ adev->gfx.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6ee4021910e2..35eff9e6ce16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs {
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
};
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
-};
-
struct sq_work {
struct work_struct work;
unsigned ih_data;
@@ -311,9 +289,6 @@ struct amdgpu_gfx {
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex;
@@ -384,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
-
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5790db61fa2c..a12f33c0f5df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
@@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
gmc->fault_hash[hash].idx = gmc->last_fault++;
return false;
}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
+ r = adev->umc.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
+ r = adev->mmhub.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return amdgpu_xgmi_ras_late_init(adev);
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_umc_ras_fini(adev);
+ amdgpu_mmhub_ras_fini(adev);
+ amdgpu_xgmi_ras_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b6e1d98ef01e..555d8e57fae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -99,12 +99,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
+ /* map mtype to hardware flags */
+ uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
@@ -120,6 +123,7 @@ struct amdgpu_xgmi {
/* gpu list in the same hive */
struct list_head head;
bool supported;
+ struct ras_common_if *ras_if;
};
struct amdgpu_gmc {
@@ -153,6 +157,7 @@ struct amdgpu_gmc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
@@ -177,15 +182,14 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
- struct ras_common_if *umc_ras_if;
- struct ras_common_if *mmhub_ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -230,5 +234,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 53734da1c2df..6f9289735e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->entity.fence_context ||
+ if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->entity.fence_context)
+ if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
+ id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..6f3b03f6224f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_device *dev = adev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
+
+ /* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+
return ret;
}
@@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
+ int nvec = pci_msix_vec_count(adev->pdev);
+ unsigned int flags;
+
+ if (nvec <= 0) {
+ flags = PCI_IRQ_MSI;
+ } else {
+ flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ }
+ /* we only need one vector */
+ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
}
}
@@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+ /* Use vector 0 for MSI-X */
+ r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
@@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector pointer
+ * @ih: interrupt ring instance
*
* Dispatches IRQ to IP blocks.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9d76e0923a5a..e1bad992e83b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -246,6 +246,44 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 51e62504c279..dc7ee9358dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index f2c097983f48..137a8573d556 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct amdgpu_device *adev;
int r, acpi_status;
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) {
return -ENOMEM;
@@ -619,9 +584,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
@@ -634,15 +602,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -764,17 +735,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
- }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
@@ -1003,6 +963,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..676c48c02d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "mmhub_err_count",
+ .debugfs_name = "mmhub_err_inject",
+ };
+
+ if (!adev->mmhub.ras_if) {
+ adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->mmhub.ras_if)
+ return -ENOMEM;
+ adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if->sub_block_index = 0;
+ strcpy(adev->mmhub.ras_if->name, "mmhub");
+ }
+ ih_info.head = fs_info.head = *adev->mmhub.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
+ kfree(adev->mmhub.ras_if);
+ adev->mmhub.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
+ adev->mmhub.ras_if) {
+ struct ras_common_if *ras_if = adev->mmhub.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 2d75ecfa199b..1cd78940cf82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -23,9 +23,17 @@
struct amdgpu_mmhub_funcs {
void (*ras_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
};
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+};
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 31d4deb5d294..392300f77b13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -136,6 +136,7 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
* amdgpu_mn_read_lock - take the read side lock for this notifier
*
* @amn: our notifier
+ * @blockable: is the notifier blockable
*/
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..7d5c3a9de9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "pcie_bif_err_count",
+ .debugfs_name = "pcie_bif_err_inject",
+ };
+
+ if (!adev->nbio.ras_if) {
+ adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->nbio.ras_if)
+ return -ENOMEM;
+ adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if->sub_block_index = 0;
+ strcpy(adev->nbio.ras_if->name, "pcie_bif");
+ }
+ ih_info.head = fs_info.head = *adev->nbio.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
+free:
+ kfree(adev->nbio.ras_if);
+ adev->nbio.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
+ adev->nbio.ras_if) {
+ struct ras_common_if *ras_if = adev->nbio.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..919bd566ba3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+};
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1fead0e8b890..e3f16b49e970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in the address space of the domain.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+ NULL, cpu_addr);
+ if (r)
+ return r;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.mem, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
+
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
+ amdgpu_bo_unref(bo_ptr);
+ return r;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -451,9 +515,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
- .no_wait_gpu = false,
+ .no_wait_gpu = bp->no_wait_gpu,
.resv = bp->resv,
- .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ .flags = bp->type != ttm_bo_type_kernel ?
+ TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
@@ -1058,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ return ttm_bo_mmap_obj(vma, &bo->tbo);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 658f4c9779b7..7e99f6c58c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,6 +41,7 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
+ bool no_wait_gpu;
struct dma_resv *resv;
};
@@ -237,6 +238,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 03930313c263..d63866164496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
- * pp_dpm_pcie
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
@@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
- * Secondly,Enter a new value for each level by inputing a string that
+ * Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ * E.g.,
+ *
+ * .. code-block:: bash
+ *
+ * echo "4 5 6" > pp_dpm_sclk
+ *
+ * will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
@@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan1_input: fan speed in RPM
*
- * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
- * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* hwmon interfaces for GPU clocks:
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4d71537a960d..b996b5bc5804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -88,6 +88,17 @@ static int psp_sw_init(void *handle)
return ret;
}
+ ret = psp_mem_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initliaze memory training!\n");
+ return ret;
+ }
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
return 0;
}
@@ -95,6 +106,7 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ psp_mem_training_fini(&adev->psp);
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -151,10 +163,12 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
msleep(1);
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
/* In some cases, psp response status is not 0 even there is no
@@ -168,8 +182,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_WARN("psp command failed and response status is (0x%X)\n",
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -253,7 +268,8 @@ static int psp_tmr_init(struct psp_context *psp)
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
- if (psp->toc_start_addr &&
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc_start_addr &&
psp->toc_bin_size &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
@@ -287,15 +303,9 @@ static int psp_tmr_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
- if (ret)
- goto failed;
kfree(cmd);
- return 0;
-
-failed:
- kfree(cmd);
return ret;
}
@@ -772,6 +782,324 @@ static int psp_ras_initialize(struct psp_context *psp)
}
// ras end
+// HDCP start
+static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t hdcp_ta_mc,
+ uint64_t hdcp_mc_shared,
+ uint32_t hdcp_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_hdcp_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for hdcp ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return ret;
+}
+
+static int psp_hdcp_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
+
+ psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ PSP_HDCP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ ret = psp_hdcp_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_hdcp_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
+}
+
+static int psp_hdcp_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->hdcp_context.hdcp_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->hdcp_context.hdcp_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return 0;
+}
+// HDCP end
+
+// DTM start
+static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t dtm_ta_mc,
+ uint64_t dtm_mc_shared,
+ uint32_t dtm_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_dtm_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for dtm ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return ret;
+}
+
+static int psp_dtm_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+
+ psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->dtm_context.dtm_shared_mc_addr,
+ psp->ta_dtm_ucode_size,
+ PSP_DTM_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ ret = psp_dtm_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_dtm_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t dtm_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->dtm_context.dtm_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->dtm_context.dtm_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return 0;
+}
+// DTM end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -845,6 +1173,16 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
}
return 0;
@@ -950,21 +1288,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
struct amdgpu_device *adev = psp->adev;
- const struct sdma_firmware_header_v1_0 *sdma_hdr =
- (const struct sdma_firmware_header_v1_0 *)
- adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
- const struct gfx_firmware_header_v1_0 *ce_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- const struct gfx_firmware_header_v1_0 *pfp_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- const struct gfx_firmware_header_v1_0 *me_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- const struct gfx_firmware_header_v1_0 *mec_hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- const struct rlc_firmware_header_v2_0 *rlc_hdr =
- (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
- const struct smc_firmware_header_v1_0 *smc_hdr =
- (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+ struct common_firmware_header *hdr;
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +1299,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7:
- amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_CE:
- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_PFP:
- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_ME:
- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_MEC1:
- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_RLC_G:
- amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
break;
case AMDGPU_UCODE_ID_SMC:
- amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
break;
default:
break;
@@ -1079,10 +1411,6 @@ out:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
continue;
- /* Renoir only needs to load mec jump table one time */
- if (adev->asic_type == CHIP_RENOIR &&
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
- continue;
psp_print_fw_hdr(psp, ucode);
@@ -1091,7 +1419,8 @@ out:
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id ==
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1216,8 +1545,11 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
- if (psp->adev->psp.ta_fw)
+ if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
+ }
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -1259,6 +1591,16 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate ras ta\n");
return ret;
}
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate hdcp ta\n");
+ return ret;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate dtm ta\n");
+ return ret;
+ }
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
@@ -1278,6 +1620,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
@@ -1317,9 +1665,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index bc0947f6bc8a..09c5474ebcc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,6 +37,9 @@
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
+#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
+#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
struct psp_xgmi_node_info;
@@ -46,6 +49,8 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
};
enum psp_ring_type
@@ -108,6 +113,9 @@ struct psp_funcs
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
+ int (*mem_training_init)(struct psp_context *psp);
+ void (*mem_training_fini)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -142,6 +150,65 @@ struct psp_ras_context {
struct amdgpu_ras *ras;
};
+struct psp_hdcp_context {
+ bool hdcp_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *hdcp_shared_bo;
+ uint64_t hdcp_shared_mc_addr;
+ void *hdcp_shared_buf;
+};
+
+struct psp_dtm_context {
+ bool dtm_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *dtm_shared_bo;
+ uint64_t dtm_shared_mc_addr;
+ void *dtm_shared_buf;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+ struct amdgpu_bo *p2c_bo;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -206,9 +273,21 @@ struct psp_context
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
+
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_ucode_size;
+ uint8_t *ta_hdcp_start_addr;
+
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_ucode_size;
+ uint8_t *ta_dtm_start_addr;
+
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct psp_hdcp_context hdcp_context;
+ struct psp_dtm_context dtm_context;
struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
};
struct amdgpu_psp_funcs {
@@ -251,6 +330,12 @@ struct amdgpu_psp_funcs {
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
+#define psp_mem_training_init(psp) \
+ ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
+#define psp_mem_training_fini(psp) \
+ ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
@@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 016ea274b955..6220394521e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -25,10 +25,13 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
"none",
@@ -65,11 +68,11 @@ const char *ras_block_string[] = {
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr);
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr);
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
@@ -150,6 +153,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 1;
else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
op = 2;
+ else if (sscanf(str, "reboot %32s", block_name) == 1)
+ op = 3;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -189,6 +194,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -209,30 +218,38 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* value to the address.
*
* Second member: struct ras_debug_if::op.
- * It has three kinds of operations.
- * 0: disable RAS on the block. Take ::head as its data.
- * 1: enable RAS on the block. Take ::head as its data.
- * 2: inject errors on the block. Take ::inject as its data.
+ * It has four kinds of operations.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
+ * - 3: reboot on unrecoverable error
*
* How to use the interface?
* programs:
* copy the struct ras_debug_if in your codes and initialize it.
* write the struct to the control node.
*
- * bash:
- * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, smda, gfx, .........
- * see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block: sub block index, pass 0 if there is no sub block
+ * .. code-block:: bash
+ *
+ * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ *
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, sdma, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ * sub_block:
+ * sub block index, pass 0 if there is no sub block
+ *
+ * here are some examples for bash commands:
+ *
+ * .. code-block:: bash
*
- * here are some examples for bash commands,
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
@@ -245,8 +262,9 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* For inject, please check corresponding err count at
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
*
- * NOTE: operation is only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * .. note::
+ * Operation is only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
*/
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
@@ -276,9 +294,20 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
break;
}
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ amdgpu_ras_check_bad_page(adev, data.inject.address)) {
+ DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ data.inject.address);
+ break;
+ }
+
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
break;
+ case 3:
+ amdgpu_ras_get_context(adev)->reboot = true;
+ break;
default:
ret = -EINVAL;
break;
@@ -290,6 +319,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
return size;
}
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages containing ECC errors detected in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+
+ return ret == 1 ? size : -EIO;
+}
+
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.owner = THIS_MODULE,
.read = NULL,
@@ -297,6 +353,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -615,8 +699,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub_funcs->query_ras_error_count)
- adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.funcs->query_ras_error_count)
+ adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ if (adev->nbio.funcs->query_ras_error_count)
+ adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -628,12 +716,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
- if (err_data.ce_count)
+ if (err_data.ce_count) {
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
obj->err_data.ce_count, ras_block_str(info->head.block));
- if (err_data.ue_count)
+ }
+ if (err_data.ue_count) {
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
obj->err_data.ue_count, ras_block_str(info->head.block));
+ }
return 0;
}
@@ -664,6 +754,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
default:
@@ -733,8 +825,8 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
};
}
-/*
- * DOC: ras sysfs gpu_vram_bad_pages interface
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
*
* It allows user to read the bad pages of vram on the gpu through
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
@@ -746,14 +838,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*
* gpu pfn and gpu page size are printed in hex format.
* flags can be one of below character,
+ *
* R: reserved, this gpu page is reserved and not able to use.
+ *
* P: pending for reserve, this gpu page is marked as bad, will be reserved
- * in next window of page_reserve.
+ * in next window of page_reserve.
+ *
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
*
- * examples:
- * 0x00000001 : 0x00001000 : R
- * 0x00000002 : 0x00001000 : P
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
@@ -934,8 +1033,10 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
- con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_eeprom_ops);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -980,10 +1081,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove(con->ent);
- debugfs_remove(con->dir);
+ debugfs_remove_recursive(con->dir);
con->dir = NULL;
- con->ent = NULL;
}
/* debugfs end */
@@ -1188,14 +1287,14 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
for (; i < data->count; i++) {
(*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].bp,
+ .bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = 0,
};
if (data->last_reserved <= i)
(*bps)[i].flags = 1;
- else if (data->bps[i].bo == NULL)
+ else if (data->bps_bo[i] == NULL)
(*bps)[i].flags = 2;
}
@@ -1214,105 +1313,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
atomic_set(&ras->in_recovery, 0);
}
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr)
-{
- /* no need to free it actually. */
- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
- return 0;
-}
-
-/* reserve vram with size@offset */
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- struct amdgpu_bo *bo;
-
- if (bo_ptr)
- *bo_ptr = NULL;
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
-
- r = amdgpu_bo_create(adev, &bp, &bo);
- if (r)
- return -EINVAL;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- goto error_reserve;
-
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- offset,
- offset + size);
- if (r)
- goto error_pin;
-
- if (bo_ptr)
- *bo_ptr = bo;
-
- amdgpu_bo_unreserve(bo);
- return r;
-
-error_pin:
- amdgpu_bo_unreserve(bo);
-error_reserve:
- amdgpu_bo_unref(&bo);
- return r;
-}
-
/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
struct ras_err_handler_data *data, int pages)
{
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
- unsigned int align_space = ALIGN(new_space, 1024);
- void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
-
- if (!tmp)
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ struct amdgpu_bo **bps_bo =
+ kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
+
+ if (!bps || !bps_bo) {
+ kfree(bps);
+ kfree(bps_bo);
return -ENOMEM;
+ }
if (data->bps) {
- memcpy(tmp, data->bps,
+ memcpy(bps, data->bps,
data->count * sizeof(*data->bps));
kfree(data->bps);
}
+ if (data->bps_bo) {
+ memcpy(bps_bo, data->bps_bo,
+ data->count * sizeof(*data->bps_bo));
+ kfree(data->bps_bo);
+ }
- data->bps = tmp;
+ data->bps = bps;
+ data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages)
+ struct eeprom_table_record *bps, int pages)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = pages;
int ret = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
@@ -1329,24 +1369,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- while (i--)
- data->bps[data->count++].bp = bps[i];
-
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
data->space_left -= pages;
+
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ */
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ save_count = data->count - control->num_recs;
+ /* only new entries are saved */
+ if (save_count > 0)
+ if (amdgpu_ras_eeprom_process_recods(control,
+ &data->bps[control->num_recs],
+ true,
+ save_count)) {
+ DRM_ERROR("Failed to save EEPROM table data!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras.ras->eeprom_control;
+ struct eeprom_table_record *bps = NULL;
+ int ret = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (!control->num_recs)
+ return ret;
+
+ bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ if (amdgpu_ras_eeprom_process_recods(control, bps, false,
+ control->num_recs)) {
+ DRM_ERROR("Failed to load EEPROM table records!");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i;
+ bool ret = false;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page) {
+ ret = true;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
uint64_t bp;
- struct amdgpu_bo *bo;
- int i;
+ struct amdgpu_bo *bo = NULL;
+ int i, ret = 0;
if (!con || !con->eh_data)
return 0;
@@ -1357,18 +1493,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
goto out;
/* reserve vram at driver post stage. */
for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].bp;
+ bp = data->bps[i].retired_page;
- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
- PAGE_SIZE, &bo))
- DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL))
+ DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i + 1;
+ bo = NULL;
}
+
+ /* continue to save bad pages to eeprom even reesrve_vram fails */
+ ret = amdgpu_ras_save_bad_pages(adev);
out:
mutex_unlock(&con->recovery_lock);
- return 0;
+ return ret;
}
/* called when driver unload */
@@ -1388,11 +1535,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
goto out;
for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps[i].bo;
+ bo = data->bps_bo[i];
- amdgpu_ras_release_vram(adev, &bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i;
}
out:
@@ -1400,41 +1547,54 @@ out:
return 0;
}
-static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * write the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * read the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data **data = &con->eh_data;
+ struct ras_err_handler_data **data;
+ int ret;
- *data = kmalloc(sizeof(**data),
- GFP_KERNEL|__GFP_ZERO);
- if (!*data)
- return -ENOMEM;
+ if (con)
+ data = &con->eh_data;
+ else
+ return 0;
+
+ *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- amdgpu_ras_load_bad_pages(adev);
- amdgpu_ras_reserve_bad_pages(adev);
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+ if (ret)
+ goto free;
+
+ if (con->eeprom_control.num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ goto free;
+ ret = amdgpu_ras_reserve_bad_pages(adev);
+ if (ret)
+ goto release;
+ }
return 0;
+
+release:
+ amdgpu_ras_release_bad_pages(adev);
+free:
+ kfree((*data)->bps);
+ kfree((*data)->bps_bo);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ DRM_WARN("Failed to initialize ras recovery!\n");
+
+ return ret;
}
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
@@ -1442,13 +1602,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data = con->eh_data;
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
+ kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
@@ -1500,6 +1664,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
if (con)
return 0;
@@ -1527,31 +1692,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (amdgpu_ras_recovery_init(adev))
- goto recovery_out;
+ if (adev->nbio.funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ return r;
+ }
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- /* ras init for each ras block */
- if (adev->umc.funcs->ras_init)
- adev->umc.funcs->ras_init(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
fs_out:
- amdgpu_ras_recovery_fini(adev);
-recovery_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
return -EINVAL;
}
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info)
+{
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request gpu reset. will run again */
+ amdgpu_ras_request_reset_on_boot(adev,
+ ras_block->block);
+ return 0;
+ } else if (adev->in_suspend || adev->in_gpu_reset) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || adev->in_gpu_reset)
+ return 0;
+
+ if (ih_info->cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ if (r)
+ goto interrupt;
+ }
+
+ amdgpu_ras_debugfs_create(adev, fs_info);
+
+ r = amdgpu_ras_sysfs_create(adev, fs_info);
+ if (r)
+ goto sysfs;
+
+ return 0;
+cleanup:
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info)
+{
+ if (!ras_block || !ih_info)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
@@ -1645,3 +1885,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ uint32_t hw_supported, supported;
+
+ amdgpu_ras_check_supported(adev, &hw_supported, &supported);
+ if (!hw_supported)
+ return;
+
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+
+ amdgpu_ras_reset_gpu(adev, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6c76bb2a6843..f80fd3428c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -317,8 +317,6 @@ struct amdgpu_ras {
struct list_head head;
/* debugfs */
struct dentry *dir;
- /* debugfs ctrl */
- struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -334,7 +332,7 @@ struct amdgpu_ras {
struct mutex recovery_lock;
uint32_t flags;
-
+ bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
};
@@ -347,15 +345,14 @@ struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
unsigned long err_addr_cnt;
- uint64_t *err_addr;
+ struct eeprom_table_record *err_addr;
};
struct ras_err_handler_data {
- /* point to bad pages array */
- struct {
- unsigned long bp;
- struct amdgpu_bo *bo;
- } *bps;
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
/* the count of entries */
int count;
/* the space can place new entries */
@@ -365,7 +362,7 @@ struct ras_err_handler_data {
};
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
struct ras_ih_data {
@@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
@@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages);
+ struct eeprom_table_record *bps, int pages);
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
@@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* save bad page to eeprom before gpu reset,
+ * i2c may be unstable in gpu reset
+ */
+ if (in_task())
+ amdgpu_ras_reserve_bad_pages(adev);
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
@@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
int amdgpu_ras_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info);
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 8a32b5c93778..20af0a17d00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
return ret;
}
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control);
+
+
+static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ uint32_t tbl_sum = 0;
+
+ /* Header checksum, skip checksum field in the calculation */
+ for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
+ tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
+
+ return tbl_sum;
+}
+
+static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
+ int num)
+{
+ int i, j;
+ uint32_t tbl_sum = 0;
+
+ /* Records checksum */
+ for (i = 0; i < num; i++) {
+ struct eeprom_table_record *record = &records[i];
+
+ for (j = 0; j < sizeof(*record); j++) {
+ tbl_sum += *(((unsigned char *)record) + j);
+ }
+ }
+
+ return tbl_sum;
+}
+
+static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
+}
+
+/* Checksum = 256 -((sum of all table entries) mod 256) */
+static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num,
+ uint32_t old_hdr_byte_sum)
+{
+ /*
+ * This will update the table sum with new records.
+ *
+ * TODO: What happens when the EEPROM table is to be wrapped around
+ * and old records from start will get overridden.
+ */
+
+ /* need to recalculate updated header byte sum */
+ control->tbl_byte_sum -= old_hdr_byte_sum;
+ control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
+
+ control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+}
+
+/* table sum mod 256 + checksum must equals 256 */
+static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+
+ if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
+ DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
+ return false;
+ }
+
+ return true;
+}
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ mutex_lock(&control->tbl_mutex);
+
+ hdr->header = EEPROM_TABLE_HDR_VAL;
+ hdr->version = EEPROM_TABLE_VER;
+ hdr->first_rec_offset = EEPROM_RECORD_START;
+ hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
+
+ control->tbl_byte_sum = 0;
+ __update_tbl_checksum(control, NULL, 0, 0);
+ control->next_addr = EEPROM_RECORD_START;
+
+ ret = __update_table_header(control, buff);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+
+}
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
@@ -143,25 +237,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
EEPROM_TABLE_RECORD_SIZE;
+ control->tbl_byte_sum = __calc_hdr_byte_sum(control);
+ control->next_addr = EEPROM_RECORD_START;
+
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
} else {
DRM_INFO("Creating new EEPROM table");
- hdr->header = EEPROM_TABLE_HDR_VAL;
- hdr->version = EEPROM_TABLE_VER;
- hdr->first_rec_offset = EEPROM_RECORD_START;
- hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
-
- adev->psp.ras.ras->eeprom_control.tbl_byte_sum =
- __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control);
- ret = __update_table_header(control, buff);
+ ret = amdgpu_ras_eeprom_reset_table(control);
}
- /* Start inserting records from here */
- adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START;
-
return ret == 1 ? 0 : -EIO;
}
@@ -226,8 +313,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
i += 6;
- buff[i++] = record->mem_channel;
- buff[i++] = record->mcumc_id;
+ record->mem_channel = buff[i++];
+ record->mcumc_id = buff[i++];
memcpy(&tmp, buff + i, 6);
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
@@ -266,84 +353,15 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
-{
- int i;
- uint32_t tbl_sum = 0;
-
- /* Header checksum, skip checksum field in the calculation */
- for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
- tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
-
- return tbl_sum;
-}
-
-static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
- int num)
-{
- int i, j;
- uint32_t tbl_sum = 0;
-
- /* Records checksum */
- for (i = 0; i < num; i++) {
- struct eeprom_table_record *record = &records[i];
-
- for (j = 0; j < sizeof(*record); j++) {
- tbl_sum += *(((unsigned char *)record) + j);
- }
- }
-
- return tbl_sum;
-}
-
-static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
-}
-
-/* Checksum = 256 -((sum of all table entries) mod 256) */
-static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num,
- uint32_t old_hdr_byte_sum)
-{
- /*
- * This will update the table sum with new records.
- *
- * TODO: What happens when the EEPROM table is to be wrapped around
- * and old records from start will get overridden.
- */
-
- /* need to recalculate updated header byte sum */
- control->tbl_byte_sum -= old_hdr_byte_sum;
- control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
-
- control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
-}
-
-/* table sum mod 256 + checksum must equals 256 */
-static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
-
- if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
- DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
- return false;
- }
-
- return true;
-}
-
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num)
{
int i, ret = 0;
- struct i2c_msg *msgs;
- unsigned char *buffs;
+ struct i2c_msg *msgs, *msg;
+ unsigned char *buffs, *buff;
+ struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
if (adev->asic_type != CHIP_VEGA20)
@@ -373,9 +391,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
* 256b
*/
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
- struct i2c_msg *msg = &msgs[i];
+ buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+ msg = &msgs[i];
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
@@ -415,8 +433,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
if (!write) {
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
+ buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 41f3fcb9a29b..622269957c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -79,6 +79,7 @@ struct eeprom_table_record {
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 5c13c503e61f..6010999d9020 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
return csa_mc_addr;
}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info)
+{
+ int r, i;
+ struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+
+ if (!ih_info)
+ return -EINVAL;
+
+ if (!adev->sdma.ras_if) {
+ adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->sdma.ras_if)
+ return -ENOMEM;
+ adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if->sub_block_index = 0;
+ strcpy(adev->sdma.ras_if->name, "sdma");
+ }
+ fs_info.head = ih_info->head = *adev->sdma.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
+ &fs_info, ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
+free:
+ kfree(adev->sdma.ras_if);
+ adev->sdma.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ /* the cb member will not be used by
+ * amdgpu_ras_interrupt_remove_handler, init it only
+ * to cheat the check in ras_late_fini
+ */
+ .cb = amdgpu_sdma_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index a9ae0d8a0589..761ff8be6314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -104,4 +104,13 @@ struct amdgpu_sdma_instance *
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 77674a7b9616..f940526c5889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(unsigned int, context)
__field(unsigned int, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__field(unsigned int, context)
__field(unsigned int, seqno)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool direct),
+ TP_ARGS(pe, addr, count, incr, flags, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "direct=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->direct)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
+ TP_ARGS(pe, src, count, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->direct)
);
TRACE_EVENT(amdgpu_vm_flush,
@@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(ring, sched_job->base.sched->name);
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __entry->name = sched_job->base.sched->name;
+ __assign_str(ring, sched_job->base.sched->name)
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(ring), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8e867b8b432f..6d34e766cd54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
+#include <linux/dma-buf.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -54,6 +55,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
@@ -763,6 +765,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@@ -1227,6 +1230,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
+ gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@@ -1247,7 +1251,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@@ -1260,7 +1263,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
- if (slave && ttm->sg) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@@ -1287,9 +1302,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1298,7 +1312,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
- if (slave)
+ if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ return;
+ }
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);
@@ -1634,81 +1657,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- u64 vram_size = adev->gmc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->fw_vram_usage.size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ if (adev->fw_vram_usage.size == 0 ||
+ adev->fw_vram_usage.size > vram_size)
+ return 0;
- r = amdgpu_bo_create(adev, &bp,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->fw_vram_usage.start_offset,
+ adev->fw_vram_usage.size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+}
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
+/*
+ * Memoy training reservation functions
+ */
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
+ ctx->p2c_bo = NULL;
+
+ return 0;
+}
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+/**
+ * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from memory training.
+ */
+static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (!adev->fw_vram_usage.mem_train_support) {
+ DRM_DEBUG("memory training does not support!\n");
+ return 0;
}
- return r;
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
+ ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->p2c_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->p2c_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ return 0;
+
+Err_out:
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
+
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1764,6 +1811,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
#endif
/*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after ttm init since new bo may be created,
+ * recovery_init may fail, but it can free all resources allocated by
+ * itself and its failure should not stop amdgpu init process.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ amdgpu_ras_recovery_init(adev);
+
+ /*
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
@@ -1772,6 +1830,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /*
+ *The reserved vram for memory training must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
@@ -1782,6 +1848,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
NULL, &stolen_vga_buf);
if (r)
return r;
+
+ /*
+ * reserve one TMR (64K) memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
+ DISCOVERY_TMR_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1846,6 +1926,9 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
void *stolen_vga_buf;
/* return the VGA stolen memory (if any) back to VRAM */
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
+
+ /* return the IP Discovery TMR memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
}
/**
@@ -1857,6 +1940,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
@@ -1953,10 +2037,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- num_dw = adev->mman.buffer_funcs->copy_num_dw;
- while (num_dw & 0x7)
- num_dw++;
-
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
@@ -2016,11 +2097,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3a6115ad0196..833fc4b68940 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_RAVEN:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
- case CHIP_ARCTURUS:
- return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknown firmware load type\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b34f00d42049..410587b950f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_offset_bytes;
uint32_t ta_ras_size_bytes;
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_offset_bytes;
+ uint32_t ta_hdcp_size_bytes;
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_offset_bytes;
+ uint32_t ta_dtm_size_bytes;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..d4fb9cf27e21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras.h"
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ if (!adev->umc.ras_if) {
+ adev->umc.ras_if =
+ kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->umc.ras_if)
+ return -ENOMEM;
+ adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if->sub_block_index = 0;
+ strcpy(adev->umc.ras_if->name, "umc");
+ }
+ ih_info.head = fs_info.head = *adev->umc.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
+ adev->umc.funcs->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
+free:
+ kfree(adev->umc.ras_if);
+ adev->umc.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->umc.ras_if) {
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return AMDGPU_RAS_SUCCESS;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ if (err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt))
+ DRM_WARN("Failed to add ras bad page!\n");
+
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 975afa04df09..3283032a78e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,7 +54,8 @@
adev->umc.funcs->disable_umc_index_mode(adev);
struct amdgpu_umc_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
@@ -62,6 +63,7 @@ struct amdgpu_umc_funcs {
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
uint32_t umc_instance);
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
};
struct amdgpu_umc {
@@ -75,8 +77,17 @@ struct amdgpu_umc {
uint32_t channel_offs;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
};
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b70b3c45bb29..703677f2ff6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
@@ -428,14 +433,15 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +450,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -476,8 +482,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -507,8 +513,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -1110,13 +1116,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -1132,5 +1145,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 30ea54dd9117..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,10 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 7a6beb2e7c4e..3199e4a5ff12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -621,13 +622,14 @@ err:
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -675,13 +677,20 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5251352f5922..e775f271f1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
-/**
+/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
*/
struct amdgpu_vm_pt_cursor {
@@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
*
* @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
* @cursor: state to initialize
*
* Starts a deep first traversal of the PD/PT tree.
@@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
amdgpu_vm_pt_ancestor(cursor);
}
-/**
+/*
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
@@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
+/**
+ * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
+ *
+ * @bo: BO which was removed from the LRU
+ *
+ * Make sure the bulk_moveable flag is updated when a BO is removed from the
+ * LRU.
+ */
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
@@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}
@@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
+ * @direct: use a direct update
*
* Root PD needs to be reserved when calling this.
*
@@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo,
+ bool direct)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
@@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requesting vm
+ * @level: the page table level
+ * @direct: use a direct update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, struct amdgpu_bo_param *bp)
+ int level, bool direct,
+ struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
+ bp->no_wait_gpu = direct;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
+ * @direct: use a direct update
*
* Make sure a specific page table or directory is allocated.
*
@@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool direct)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
if (r)
goto error_free_pt;
@@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
- bool pasid_mapping_needed = id->pasid != job->pasid ||
- !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
+ bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
int r;
@@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
pasid_mapping_needed = true;
}
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
@@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
dma_fence_put(fence);
@@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
+/**
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @param: parameters for the update
+ * @params: parameters for the update
* @vm: requested vm
* @entry: entry to update
*
@@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
-/*
+/**
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
* @adev: amdgpu_device pointer
@@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
amdgpu_vm_bo_relocated(&entry->base);
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @direct: submit directly to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
if (r)
@@ -1268,7 +1295,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
@@ -1391,7 +1418,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
+ params->direct);
if (r)
return r;
@@ -1482,13 +1510,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
+ * @direct: direct submission in a page fault
+ * @exclusive: fence we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @addr: addr to set the area to
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
@@ -1497,11 +1526,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
+ dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
@@ -1511,6 +1540,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
@@ -1569,27 +1599,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- if (adev->asic_type >= CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1633,7 +1644,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1642,9 +1654,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
start, last, flags, addr,
- fence);
+ dma_addr, fence);
if (r)
return r;
@@ -1672,8 +1684,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1700,7 +1711,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = dma_resv_get_excl(bo->tbo.base.resv);
+ exclusive = bo->tbo.moving;
}
if (bo) {
@@ -1731,12 +1742,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r;
}
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
@@ -1744,7 +1749,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1938,9 +1944,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
mapping->start, mapping->last,
- init_pte_value, 0, &f);
+ init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2682,12 +2688,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
- /* create scheduler entity for page table updates */
- r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+ /* create scheduler entities for page table updates */
+ r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
+ r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
+ adev->vm_manager.vm_pte_num_rqs, NULL);
+ if (r)
+ goto error_free_direct;
+
vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@@ -2702,7 +2713,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2711,12 +2723,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
@@ -2728,7 +2740,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root);
+ r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2759,8 +2771,11 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
-error_free_sched_entity:
- drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+ drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+ drm_sched_entity_destroy(&vm->direct);
return r;
}
@@ -2801,6 +2816,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @pasid: pasid to use
*
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
@@ -2816,7 +2832,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* Returns:
* 0 for success, -errno for errors.
*/
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned int pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -2848,7 +2865,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
if (r)
goto free_idr;
}
@@ -2858,7 +2875,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2937,19 +2955,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- int i, r;
+ int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ vm->pasid = 0;
+ }
+
+ list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
+ if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ amdgpu_vm_prt_fini(adev, vm);
+ prt_fini_needed = false;
+ }
+
+ list_del(&mapping->list);
+ amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- drm_sched_entity_destroy(&vm->entity);
+ amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
+
+ drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2962,26 +2999,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
list_del(&mapping->list);
kfree(mapping);
}
- list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
- amdgpu_vm_prt_fini(adev, vm);
- prt_fini_needed = false;
- }
-
- list_del(&mapping->list);
- amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
- }
- root = amdgpu_bo_ref(vm->root.base.bo);
- r = amdgpu_bo_reserve(root, true);
- if (r) {
- dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
- } else {
- amdgpu_vm_free_pts(adev, vm, NULL);
- amdgpu_bo_unreserve(root);
- }
- amdgpu_bo_unref(&root);
- WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3065,8 +3083,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
- /* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
+ /* We only have requirement to reserve vmid from gfxhub */
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
+ AMDGPU_GFXHUB_0);
if (r)
return r;
break;
@@ -3109,13 +3128,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
*/
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
- if (!vm->task_info.pid) {
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
+ if (vm->task_info.pid)
+ return;
- if (current->group_leader->mm == current->mm) {
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
- }
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr)
+{
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ long r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ else
+ root = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+
+ if (!root)
+ return false;
+
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm && vm->root.base.bo != root)
+ vm = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ goto error_unlock;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
}
+
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
+ flags, value, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ DRM_ERROR("Can't handle page fault (%ld)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2eda3a8c330d..4dbbe1b6b413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* Reserve 4MB VRAM for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
+
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 3
#define AMDGPU_GFXHUB_0 0
@@ -199,6 +202,11 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
+ /**
* @pages_addr:
*
* DMA addresses to use for mapping
@@ -254,8 +262,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
- /* Scheduler entity for page table updates */
- struct drm_sched_entity entity;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity direct;
+ struct drm_sched_entity delayed;
unsigned int pasid;
/* dedicated to vm */
@@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
@@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info);
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..73fec7a0ced5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
- return 0;
+ /* Don't wait for submissions during page fault */
+ if (p->direct)
+ return 0;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
@@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 61fc584cbb1a..832db59f441e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
+ p->num_dw_left = ndw;
+
+ /* Wait for moves to be completed */
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
- if (r)
- return r;
+ /* Don't wait for any submissions during page fault handling */
+ if (p->direct)
+ return 0;
- p->num_dw_left = ndw;
- return 0;
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
+ owner, false);
}
/**
@@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
amdgpu_bo_fence(root, f, true);
- if (fence)
+ if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -120,7 +123,6 @@ error:
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
@@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3a9d8c15fe9f..82a3299e53c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,9 @@
*/
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_atomfirmware.h"
+#include "atom.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
@@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
}
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
+ case INFINEON:
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
+ case ELPIDA:
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
+ case ETRON:
+ return snprintf(buf, PAGE_SIZE, "etron\n");
+ case NANYA:
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
+ case HYNIX:
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
+ case MOSEL:
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
+ case WINBOND:
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
+ case ESMT:
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
+ case MICRON:
+ return snprintf(buf, PAGE_SIZE, "micron\n");
+ default:
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+ }
+}
+
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
amdgpu_mem_info_vram_total_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
@@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
amdgpu_mem_info_vram_used_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
@@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
+ return ret;
+ }
return 0;
}
@@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
return 0;
}
@@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t vis_usage = 0, mem_bytes;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
unsigned i;
int r;
@@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 65aae75f80fd..00371713c671 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
+#include "amdgpu_ras.h"
#include "df/df_3_6_offset.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -437,3 +438,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_unlock(&hive->hive_lock);
}
}
+
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "xgmi_wafl_err_count",
+ .debugfs_name = "xgmi_wafl_err_inject",
+ };
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ if (!adev->gmc.xgmi.ras_if) {
+ adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gmc.xgmi.ras_if)
+ return -ENOMEM;
+ adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if->sub_block_index = 0;
+ strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
+ }
+ ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
+ kfree(adev->gmc.xgmi.ras_if);
+ adev->gmc.xgmi.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
+ adev->gmc.xgmi.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index fbcee31788c4..bbf504ff7051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
index 4853899b1824..fda99c958c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "arct_ip_offset.h"
int arct_reg_base_init(struct amdgpu_device *adev)
@@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..fc8b34480f66 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1270,15 +1270,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * cik_asic_reset - soft reset GPU
+ * cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int cik_asic_reset(struct amdgpu_device *adev)
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -1294,7 +1294,45 @@ static int cik_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ /* disable baco reset until it works */
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
+ baco_reset = false;
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = cik_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 54c625a2e570..9870bf27870e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 645550e7caf5..40d2ac723dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d9f470632b2c..898ef72d423c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 3eb2e7429269..db15a112becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
@@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
@@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a16c5e9e610e..f06c9022c1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 844c03868248..d6221298b477 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
}
+static void df_v1_7_sw_fini(struct amdgpu_device *adev)
+{
+}
+
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v1_7_funcs = {
.sw_init = df_v1_7_sw_init,
+ .sw_fini = df_v1_7_sw_fini,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5850c8e34caa..16fbd2bc8ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
unsigned long flags, address, data;
uint32_t ficadl_val, ficadh_val;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
adev->df_perfmon_config_assign_mask[i] = 0;
}
+static void df_v3_6_sw_fini(struct amdgpu_device *adev)
+{
+
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+
+}
+
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
+ .sw_fini = df_v3_6_sw_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 957811b73672..b2ad928f60ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
@@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
};
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
@@ -2443,7 +2443,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2513,7 +2513,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2582,7 +2582,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2903,7 +2903,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -4357,7 +4357,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4377,8 +4377,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5283,15 +5283,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- /* init asic gds info */
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- default:
- adev->gds.gds_size = 0x10000;
- adev->gds.gds_compute_max_wave_id = 0x4ff;
- break;
- }
+ unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines;
+ adev->gds.gds_size = 0x10000;
+ adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dcadc73bffd2..47e256b6a0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+struct ras_gfx_subblock_reg {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
@@ -1324,7 +1336,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_RENOIR) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1956,190 +1969,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
- struct amdgpu_ngg_buf *ngg_buf,
- int size_se,
- int default_size_se)
-{
- int r;
-
- if (size_se < 0) {
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
- return -EINVAL;
- }
- size_se = size_se ? size_se : default_size_se;
-
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &ngg_buf->bo,
- &ngg_buf->gpu_addr,
- NULL);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
- return r;
- }
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
-
- return r;
-}
-
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < NGG_BUF_MAX; i++)
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
- &adev->gfx.ngg.buf[i].gpu_addr,
- NULL);
-
- memset(&adev->gfx.ngg.buf[0], 0,
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
-
- adev->gfx.ngg.init = false;
-
- return 0;
-}
-
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
- return 0;
-
- /* GDS reserve memory: 64 bytes alignment */
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
-
- /* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
- amdgpu_prim_buf_per_se,
- 64 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
- goto err;
- }
-
- /* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
- amdgpu_pos_buf_per_se,
- 256 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Position Buffer\n");
- goto err;
- }
-
- /* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
- amdgpu_cntl_sb_buf_per_se,
- 256);
- if (r) {
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
- goto err;
- }
-
- /* Parameter Cache, not created by default */
- if (amdgpu_param_buf_per_se <= 0)
- goto out;
-
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
- amdgpu_param_buf_per_se,
- 512 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
- goto err;
- }
-
-out:
- adev->gfx.ngg.init = true;
- return 0;
-err:
- gfx_v9_0_ngg_fini(adev);
- return r;
-}
-
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
- int r;
- u32 data, base;
-
- if (!amdgpu_ngg)
- return 0;
-
- /* Program buffer size */
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
-
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
-
- /* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
-
- /* Clear GDS reserved memory */
- r = amdgpu_ring_alloc(ring, 17);
- if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
- ring->name, r);
- return r;
- }
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.gds_size +
- adev->gfx.ngg.gds_reserve_size));
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
- PACKET3_DMA_DATA_DST_SEL(1) |
- PACKET3_DMA_DATA_SRC_SEL(2)));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
- adev->gfx.ngg.gds_reserve_size);
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
-
- amdgpu_ring_commit(ring);
-
- return 0;
-}
-
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
@@ -2307,10 +2136,6 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -2320,19 +2145,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_gfx_ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2344,7 +2157,6 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- gfx_v9_0_ngg_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
@@ -3883,12 +3695,6 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
- r = gfx_v9_0_ngg_en(adev);
- if (r)
- return r;
- }
-
return r;
}
@@ -4184,6 +3990,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
@@ -4203,6 +4010,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
@@ -4393,33 +4204,14 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry);
-
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gfx_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__GFX,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "gfx",
- };
int r;
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
+ r = amdgpu_gfx_ras_late_init(adev);
+ if (r)
+ return r;
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
@@ -4430,72 +4222,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto irq;
-
return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -4560,16 +4287,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
{
amdgpu_gfx_rlc_enter_safe_mode(adev);
- if (is_support_sw_smu(adev) && !enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
-
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
@@ -4838,8 +4563,6 @@ static int gfx_v9_0_set_powergating_state(void *handle,
gfx_v9_0_enable_cp_power_gating(adev, false);
/* update gfx cgpg state */
- if (is_support_sw_smu(adev) && enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
/* update mgcg state */
@@ -4970,7 +4693,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4990,8 +4713,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5723,313 +5446,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- /* TODO ue will trigger an interrupt. */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_SUCCESS;
-}
-static const struct {
- const char *name;
- uint32_t ip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t per_se_instance;
- int32_t num_instance;
- uint32_t sec_count_mask;
- uint32_t ded_count_mask;
-} gfx_ras_edc_regs[] = {
- { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
- { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
- { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
- { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
- { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
- { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
- { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
- { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
- { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
- { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
- { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
- { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
- { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
+static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
+ },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
+ },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
+ 0, 0
+ },
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
+ 0, 0
+ },
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
+ },
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
+ },
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
+ 0, 0
+ },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
- 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
+ 0, 0
+ },
{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
- { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
- REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
- { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
- { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
- { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
- { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
- { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
- { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
- { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
- { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
- { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
- { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
- { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
- { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
- { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
- { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
- { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
- { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
- { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
- { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
- { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
- { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
- { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
+ },
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
+ },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
+ },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
+ },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
+ },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
- 0 },
- { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
+ 0, 0
+ },
{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
- 0 },
- { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
- { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
- REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
- { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
- { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
- { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
- { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
- { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
- { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
- { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
- { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
- { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
- { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
- { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
- { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
- { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
- { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
- { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
- { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
- { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
+ },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
+ },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ 0, 0
+ },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
+ },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
+ },
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
+ },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
+ },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
+ },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
+ },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
+ },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
+ },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
+ },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
+ },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
+ },
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
- { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
- { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
- { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
- { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
- { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
- { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
- { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
- { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
- { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
- { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
- { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
- { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
- { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
- { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
+ },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
+ },
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ 0, 0
+ }
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
@@ -6078,14 +5934,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
+static const char *vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+};
+
+static const char *vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_LOG_FIFO",
+};
+
+static const char *atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char *atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < 16; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 7; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = (data & 0x00018000L) >> 0xf;
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+
+ return 0;
+}
+
+static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
+ if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
+ ras_subblock_regs[i].seg != reg->seg ||
+ ras_subblock_regs[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value &
+ ras_subblock_regs[i].sec_count_mask) >>
+ ras_subblock_regs[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ ras_subblock_regs[i].ded_count_mask) >>
+ ras_subblock_regs[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t sec_count, ded_count;
- uint32_t i;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
uint32_t reg_value;
- uint32_t se_id, instance_id;
if (adev->asic_type != CHIP_VEGA20)
return -EINVAL;
@@ -6094,71 +6153,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
- for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
- for (instance_id = 0; instance_id < 256; instance_id++) {
- for (i = 0;
- i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
- i++) {
- if (se_id != 0 &&
- !gfx_ras_edc_regs[i].per_se_instance)
- continue;
- if (instance_id >= gfx_ras_edc_regs[i].num_instance)
- continue;
-
- gfx_v9_0_select_se_sh(adev, se_id, 0,
- instance_id);
-
- reg_value = RREG32(
- adev->reg_offset[gfx_ras_edc_regs[i].ip]
- [gfx_ras_edc_regs[i].inst]
- [gfx_ras_edc_regs[i].seg] +
- gfx_ras_edc_regs[i].reg_offset);
- sec_count = reg_value &
- gfx_ras_edc_regs[i].sec_count_mask;
- ded_count = reg_value &
- gfx_ras_edc_regs[i].ded_count_mask;
- if (sec_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, SEC %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- sec_count);
- err_data->ce_count++;
- }
- if (ded_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, DED %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- ded_count);
- err_data->ue_count++;
- }
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
+ for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
+ for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ if (reg_value)
+ __get_ras_error_count(&sec_ded_counter_registers[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
- if (!ras_if)
- return 0;
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
- ih_data.head = *ras_if;
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
- DRM_ERROR("CP ECC ERROR IRQ\n");
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -6325,7 +6342,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
- .process = gfx_v9_0_cp_ecc_error_irq,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ce37ce77d14..9ec4297e61e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 8b789f750b72..b601c6740ef5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
+ int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
-
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -166,6 +170,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 06807940748b..392b8cd94fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v2_0_init(struct amdgpu_device *adev);
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 241a4e57cf4a..fb48622c2abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -278,7 +278,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
mutex_lock(&adev->mman.gtt_window_lock);
@@ -397,43 +397,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system
* 0 valid
*/
-static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -460,12 +440,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .map_mtype = gmc_v10_0_map_mtype,
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -519,8 +519,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = gfxhub_v2_0_get_fb_location(adev);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
@@ -540,24 +539,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
-
- if (!amdgpu_emu_mode)
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- else {
- /* hard code vram_width for emulation */
- chansize = 128;
- numchan = 1;
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* Could aper size report 0 ? */
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -636,7 +624,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev);
@@ -644,7 +632,15 @@ static int gmc_v10_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (!amdgpu_emu_mode)
+ adev->gmc.vram_width = vram_width;
+ else
+ adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -794,7 +790,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* Flush HDP after it is initialized */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fb1765e92d1..b205039350b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
@@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
- .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c3d9bc3a641..f08e5330642d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ea764dd9245d..6d96d40fbcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f91337030dc0..9f2a893871ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -51,10 +51,12 @@
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
#include "umc_v6_1.h"
+#include "umc_v6_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
@@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, err_data);
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count)
- amdgpu_ras_reset_gpu(adev, 0);
-
- return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
/* If it's the first fault for this address, process it normally */
+ if (retry_fault && !in_interrupt() &&
+ amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
.set = gmc_v9_0_ecc_interrupt_state,
- .process = gmc_v9_0_process_ecc_irq,
+ .process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -584,44 +552,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
+static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
+ case AMDGPU_VM_MTYPE_RW:
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -648,12 +597,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .map_mtype = gmc_v9_0_map_mtype,
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -664,6 +635,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->umc.funcs = &umc_v6_0_funcs;
+ break;
case CHIP_VEGA20:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
@@ -681,7 +655,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->mmhub_funcs = &mmhub_v1_0_funcs;
+ adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
default:
break;
@@ -762,140 +736,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
- struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = NULL;
- struct ras_ih_if ih_info = {
- .cb = gmc_v9_0_process_ras_data_cb,
- };
- int r;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- ras_if = &adev->gmc.umc_ras_if;
- else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
- ras_if = &adev->gmc.mmhub_ras_if;
- else
- BUG();
-
- if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
- amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = *ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info->head = **ras_if;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
- }
-
- amdgpu_ras_debugfs_create(adev, fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, fs_info);
- if (r)
- goto sysfs;
-resume:
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
-}
-
-static int gmc_v9_0_ecc_late_init(void *handle)
-{
- int r;
-
- struct ras_fs_if umc_fs_info = {
- .sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
- };
- struct ras_common_if umc_ras_block = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "umc",
- };
- struct ras_fs_if mmhub_fs_info = {
- .sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
- };
- struct ras_common_if mmhub_ras_block = {
- .block = AMDGPU_RAS_BLOCK__MMHUB,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "mmhub",
- };
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &umc_fs_info, &umc_ras_block);
- if (r)
- return r;
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &mmhub_fs_info, &mmhub_ras_block);
- return r;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool r;
+ int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
@@ -929,7 +773,7 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- r = gmc_v9_0_ecc_late_init(handle);
+ r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -970,33 +814,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
int r;
- if (amdgpu_sriov_vf(adev)) {
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- } else if (amdgpu_emu_mode != 1) {
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- }
-
- if (!adev->gmc.vram_width) {
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
-
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -1108,7 +930,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev);
@@ -1119,7 +941,32 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
+
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
+
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+
+ numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->num_vmhubs = 2;
@@ -1240,33 +1087,7 @@ static int gmc_v9_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *stolen_vga_buf;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->gmc.umc_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /* remove fs first */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /* remove the IH */
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
- adev->gmc.mmhub_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
-
- /* remove fs and disable ras feature */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
+ amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1316,13 +1137,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
- int r, i;
- bool value;
- u32 tmp;
-
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ int r;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1332,15 +1147,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
- default:
- break;
- }
-
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
@@ -1352,6 +1158,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v9_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+ int r, i;
+ u32 tmp;
+
+ /* The sequence of these two function calls matters.*/
+ gmc_v9_0_init_golden_registers(adev);
+
+ if (adev->mode_info.num_crtc) {
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ }
+ }
+
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* TODO for renoir */
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
@@ -1361,7 +1210,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
/* After HDP is initialized, flush HDP.*/
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -1377,28 +1226,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
- return 0;
-}
-
-static int gmc_v9_0_hw_init(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* The sequence of these two function calls matters.*/
- gmc_v9_0_init_golden_registers(adev);
-
- if (adev->mode_info.num_crtc) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
r = gmc_v9_0_gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 04cd4b6f95d4..6965e1e6fa9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -616,5 +618,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
}
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 3542c203c3c8..2eea702de8ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -31,20 +31,25 @@
#include "soc15_common.h"
-static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
+ int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -152,6 +157,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index db16f3ece218..3ea4344f0315 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0cf7ef44b4b5..657970f9ebfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -240,6 +240,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
new file mode 100644
index 000000000000..0d8767eb7a70
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "soc15.h"
+#include "navi10_ih.h"
+#include "soc15_common.h"
+#include "mxgpu_nv.h"
+#include "mxgpu_ai.h"
+
+static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
+}
+
+static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
+
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+}
+
+
+static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ if (reg != event)
+ return -ENOENT;
+
+ xgpu_nv_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
+{
+ return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
+static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
+
+ mdelay(5);
+ timeout -= 5;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+
+ return -ETIME;
+}
+
+static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+
+ do {
+ r = xgpu_nv_mailbox_rcv_msg(adev, event);
+ if (!r)
+ return 0;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
+}
+
+static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
+{
+ u32 reg;
+ int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_nv_mailbox_set_valid(adev, false);
+ trn = xgpu_nv_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while (trn);
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_nv_mailbox_set_valid(adev, true);
+
+ /* start to poll ack */
+ r = xgpu_nv_poll_ack(adev);
+ if (r)
+ pr_err("Doesn't get ack from pf, continue\n");
+
+ xgpu_nv_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (req == IDH_REQ_GPU_INIT_ACCESS ||
+ req == IDH_REQ_GPU_FINI_ACCESS ||
+ req == IDH_REQ_GPU_RESET_ACCESS) {
+ r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r) {
+ pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ return r;
+ }
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ }
+ }
+
+ return 0;
+}
+
+static int xgpu_nv_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+
+ req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_nv_send_access_requests(adev, req);
+}
+
+static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+ int r = 0;
+
+ req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_nv_send_access_requests(adev, req);
+
+ return r;
+}
+
+static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked) {
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_device_should_recover_gpu(adev))
+ amdgpu_device_gpu_recover(adev, NULL);
+}
+
+static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_ack_irq,
+ .process = xgpu_nv_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_rcv_irq,
+ .process = xgpu_nv_mailbox_rcv_irq,
+};
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
+ .req_full_gpu = xgpu_nv_request_full_gpu_access,
+ .rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .reset_gpu = xgpu_nv_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_nv_mailbox_trans_msg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
new file mode 100644
index 000000000000..99b15f6865cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MXGPU_NV_H__
+#define __MXGPU_NV_H__
+
+#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+
+extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9fe08408db58..9af73567e716 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
navi10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
index a56c93620e78..88efaecf9f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi10_ip_offset.h"
int navi10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
index cadc7603ca41..a786d159e5e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi12_ip_offset.h"
int navi12_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
index 3b5f0f65e096..4ea1e8fbb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi14_ip_offset.h"
int navi14_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index c05d78d4efc6..f3a3fe746222 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -27,11 +27,21 @@
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+
+static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
@@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
@@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
+ .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index 5ae52085f6b7..a43b60acf7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6590143c3f75..635d9e1fc0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 0743a6f016f3..6dc743b73218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 74eecb768a82..d6cbf26074bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 508d549c5029..e7aefb252550 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 910fffced43b..0db458f9fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -23,10 +23,12 @@
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
+#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
@@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
- uint32_t def, data;
- def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
- data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+}
- if (def != data)
- WREG32_PCIE(smnPCIE_CI_CNTL, data);
+static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+
+static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
+ .set = nbio_v7_4_set_ras_controller_irq_state,
+ .process = nbio_v7_4_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_4_process_err_event_athub_irq,
+};
+
+static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_4_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_4_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ uint32_t global_sts, central_sts, int_eoi;
+ uint32_t corr, fatal, non_fatal;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
+ fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
+ non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
+ ParityErrNonFatal);
+
+ if (corr)
+ err_data->ce_count++;
+ if (fatal)
+ err_data->ue_count++;
+
+ if (corr || fatal || non_fatal) {
+ central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+ /* clear error status register */
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+
+ if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
+ BIFL_RasContller_Intr_Recv)) {
+ /* clear interrupt status register */
+ WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
+ int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
+ int_eoi = REG_SET_FIELD(int_eoi,
+ IOHC_INTERRUPT_EOI, SMI_EOI, 1);
+ WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
+ }
+ }
+}
+
+static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
@@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
@@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index c442865bac4f..b1ac82872752 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index de9b995b65b1..46206a1a1f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -46,6 +46,7 @@
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -53,6 +54,7 @@
#include "vcn_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
+#include "mxgpu_nv.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -63,8 +65,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -78,8 +80,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -119,7 +121,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -279,7 +281,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -368,8 +370,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -423,9 +425,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
- adev->nbio_funcs = &nbio_v2_3_funcs;
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_nv_virt_ops;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -435,7 +441,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -446,7 +452,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (adev->enable_mes)
@@ -458,7 +464,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -469,7 +475,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
@@ -482,12 +488,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -583,8 +589,11 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
static int nv_common_early_init(void *handle)
{
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -667,16 +676,31 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_nv_mailbox_set_irq_funcs(adev);
+ }
+
return 0;
}
static int nv_common_late_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_get_irq(adev);
+
return 0;
}
static int nv_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -694,7 +718,13 @@ static int nv_common_hw_init(void *handle)
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
@@ -856,9 +886,9 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
nv_update_hdp_mem_power_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -886,7 +916,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5d95e614369a..b345e69ba246 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -40,6 +40,9 @@
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
@@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
-
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
out:
if (err) {
dev_err(adev->dev,
@@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 10166104b8a3..ace6e6c5629c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
#define mmSDMA0_UCODE_DATA_NV10 0x5881
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
@@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- case CHIP_ARCTURUS:
break;
default:
BUG();
@@ -205,18 +208,26 @@ out:
return err;
}
+static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
@@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
return false;
}
+static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (psp_v11_0_support_vmr_ring(psp)) {
+ ret = psp_v11_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -451,33 +492,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
- else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
- GFX_CTRL_CMD_ID_DESTROY_RINGS);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
-
- return ret;
-}
static int psp_v11_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
@@ -541,6 +555,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
@@ -889,6 +904,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
return psp_rlc_autoload_start(psp);
}
+static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+static void psp_v11_0_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_v11_0_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ DRM_DEBUG("memory training is not supported!\n");
+ return 0;
+ }
+
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
+ return 0;
+
+Err_out:
+ psp_v11_0_memory_training_fini(psp);
+ return ret;
+}
+
+/*
+ * save and restore proces
+ */
+static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ int ret;
+ uint32_t p2c_header[4];
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ DRM_DEBUG("Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ DRM_ERROR("Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v11_0_is_sos_alive(psp)) {
+ DRM_DEBUG("SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ DRM_DEBUG("Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ DRM_DEBUG("Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ return ret;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ DRM_ERROR("send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -909,6 +1080,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
+ .mem_training_init = psp_v11_0_memory_training_init,
+ .mem_training_fini = psp_v11_0_memory_training_fini,
+ .mem_training = psp_v11_0_memory_training,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index c72e43f8e0be..8f553f6f92d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index d2c727f6a8bd..fdc00938327b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 78452cf0115d..4fb9f1929809 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -746,13 +746,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
@@ -1690,102 +1690,17 @@ static int sdma_v4_0_early_init(void *handle)
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->sdma.ras_if;
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__SDMA,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "sdma",
- };
- int r, i;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- r = 0;
- }
- goto feature;
- }
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
+ return amdgpu_sdma_ras_late_init(adev, &ih_info);
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1857,21 +1772,7 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_sdma_ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -1891,7 +1792,7 @@ static int sdma_v4_0_hw_init(void *handle)
if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu) ||
- adev->asic_type == CHIP_RENOIR)
+ (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
@@ -2024,52 +1925,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry)
{
- uint32_t err_source;
int instance;
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
- return 0;
-
- switch (entry->src_id) {
- case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
- err_source = 0;
- break;
- case SDMA0_4_0__SRCID__SDMA_ECC:
- err_source = 1;
- break;
- default:
- return 0;
- }
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ goto out;
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+out:
return AMDGPU_RAS_SUCCESS;
}
-static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2417,7 +2294,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
- .process = sdma_v4_0_process_ecc_irq,
+ .process = amdgpu_sdma_process_ecc_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index fa2f70ce2e2b..b8fdb192f6d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f8ab80c8801b..9be0168217f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -58,6 +58,9 @@
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u64 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* read low 32 bit */
@@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* write low 32 bit */
@@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -461,7 +464,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -475,42 +478,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
+ *cap = smu_baco_is_support(smu);
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ }
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
+ dev_info(adev->dev, "GPU BACO reset\n");
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- dev_info(adev->dev, "GPU BACO reset\n");
+ if (smu_baco_reset(smu))
+ return -EIO;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+ }
- adev->in_baco_reset = 1;
+ /* re-enable doorbell interrupt after BACO exit */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
static int soc15_mode2_reset(struct amdgpu_device *adev)
{
+ if (is_support_sw_smu(adev))
+ return smu_mode2_reset(&adev->smu);
if (!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
return -ENOENT;
@@ -525,6 +552,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -626,8 +654,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -641,7 +669,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -667,13 +695,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->flags & AMD_IS_APU)
- adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->nbio_funcs = &nbio_v7_4_funcs;
- else
- adev->nbio_funcs = &nbio_v6_1_funcs;
+ if (adev->flags & AMD_IS_APU) {
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ } else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ } else {
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ }
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df_funcs = &df_v3_6_funcs;
@@ -681,7 +713,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df_funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -750,13 +782,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ }
+
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+
+ if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
break;
case CHIP_RENOIR:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -785,7 +830,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
@@ -1157,7 +1202,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_IH_CG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x32;
break;
@@ -1208,11 +1254,15 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- return 0;
+ if (adev->nbio.funcs->ras_late_init)
+ r = adev->nbio.funcs->ras_late_init(adev);
+
+ return r;
}
static int soc15_common_sw_init(void *handle)
@@ -1229,6 +1279,10 @@ static int soc15_common_sw_init(void *handle)
static int soc15_common_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_nbio_ras_fini(adev);
+ adev->df_funcs->sw_fini(adev);
return 0;
}
@@ -1241,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
}
}
@@ -1260,13 +1314,13 @@ static int soc15_common_hw_init(void *handle)
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio_funcs->remap_hdp_registers)
- adev->nbio_funcs->remap_hdp_registers(adev);
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -1289,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ if (adev->nbio.ras_if &&
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
return 0;
}
@@ -1429,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1446,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle,
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1477,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index a3dde0c31f57..9af6c6ffbfa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -67,6 +67,8 @@ struct soc15_allowed_register_entry {
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
new file mode 100644
index 000000000000..0d6b50528d76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_0.h"
+#include "amdgpu.h"
+
+static void umc_v6_0_init_registers(struct amdgpu_device *adev)
+{
+ unsigned i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002);
+}
+
+const struct amdgpu_umc_funcs umc_v6_0_funcs = {
+ .init_registers = umc_v6_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
new file mode 100644
index 000000000000..109f1a57a46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_0_H__
+#define __UMC_V6_0_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+extern const struct amdgpu_umc_funcs umc_v6_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 8502e736f721..47c4b96b14d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
+static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+{
+ uint32_t rsmu_umc_index;
+
+ rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ return REG_GET_FIELD(rsmu_umc_index,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_INSTANCE);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
uint32_t umc_reg_offset, uint32_t channel_index)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr;
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
/* calculate error address if ue/ce error is detected */
@@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
- err_data->err_addr[err_data->err_addr_cnt] =
- ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- err_data->err_addr_cnt++;
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+
+ err_data->err_addr_cnt++;
+ }
}
/* clear umc status */
@@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
}
-static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
+static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t channel_index)
{
@@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
}
-static void umc_v6_1_ras_init(struct amdgpu_device *adev)
+static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
void *ras_error_status = NULL;
- amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel);
+ amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
- .ras_init = umc_v6_1_ras_init,
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 670784a78512..217084d56ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 01f658fa72c6..0995378d8263 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 93b3500e522b..b4f84a820a44 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 36ad0c0e8efb..38f787a560cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst->ring_jpeg;
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 395c2259f979..b3623ed3dc42 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
@@ -255,32 +256,27 @@ static int vcn_v2_5_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, j);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
+ /* disable encode rings till the robustness of the FW */
ring->sched.ready = false;
continue;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst[j].ring_jpeg;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
done:
if (!r)
@@ -423,7 +419,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Disable clock gating for VCN block
*/
@@ -542,7 +537,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Enable clock gating for VCN block
*/
@@ -716,6 +710,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -946,6 +943,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 9eae3536ddad..5cb7e231de5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle)
return 0;
}
+static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ /**
+ * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
+ * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ */
+ if (adev->asic_type > CHIP_VEGA10) {
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ }
+
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
static int vega10_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega10_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
return 0;
+
}
static int vega10_ih_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index bd0580334f83..6b52a539d51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 587e33f5dcce..556f854e3551 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5f8c8786cac5..78e5cdc0c058 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
+
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+}
+
+int smu7_asic_baco_reset(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ return 0;
+}
+
/**
- * vi_asic_reset - soft reset GPU
+ * vi_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int vi_asic_reset(struct amdgpu_device *adev)
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ smu7_asic_get_baco_capability(adev, &baco_reset);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = vi_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 8de0772f986c..40d4174913a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 901fe3590165..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
0xbf0a717c, 0xbf85fff8,
- 0xbf820141, 0xbef4037e,
+ 0xbf820142, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304080,
0x725d0100, 0xe0304100,
0x725d0200, 0xe0304180,
- 0x725d0300, 0xbf820031,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
0x00000400, 0xbefc0384,
@@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304100,
0x725d0100, 0xe0304200,
0x725d0200, 0xe0304300,
- 0x725d0300, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef603ff,
- 0x01000000, 0xbefc03ff,
- 0x0000006c, 0x80f89078,
- 0xf429003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc847c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0x80f8a078,
- 0xf42d003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc887c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0x80f8c078,
- 0xf431003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0xbe883108,
- 0xbe8a310a, 0xbe8c310c,
- 0xbe8e310e, 0xbf06807c,
- 0xbf84fff0, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0xbef603ff, 0x01000000,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbef2036d, 0x876dff72,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f9f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0x876fff72,
- 0xfc000000, 0x906f9a6f,
- 0x8f6f906f, 0xbef30380,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
0x88736f73, 0x876fff72,
- 0x02000000, 0x906f996f,
- 0x8f6f8f6f, 0x88736f73,
- 0x876fff72, 0x01000000,
- 0x906f986f, 0x8f6f996f,
- 0x88736f73, 0x876fff70,
- 0x00800000, 0x906f976f,
- 0xb9f3f807, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9f0f802,
- 0xbf8a0000, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
+ 0x876fff70, 0x00800000,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f0f802, 0xbf8a0000,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbf820001, 0xbf8202c4,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index cdaa523ce6be..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -758,6 +758,7 @@ L_RESTORE_V0:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
/* restore SGPRs */
//will be 2+8+16*6
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..9af45d07515b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
if (IS_ERR(process)) {
@@ -1856,7 +1856,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 66387caf966e..0c327e0fc0f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..d59f2cd056c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0dc1084b5e82..8f4b24e84964 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,6 +39,41 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
.asic_name = "navi10",
@@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- { 0x738C, &arcturus_device_info }, /* Arcturus */
- { 0x7388, &arcturus_device_info }, /* Arcturus */
- { 0x738E, &arcturus_device_info }, /* Arcturus */
- { 0x7390, &arcturus_device_info }, /* Arcturus vf */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
-
- return NULL;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d985e31fcc1e..81fb545cf42c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
+
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -579,7 +588,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -621,7 +630,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +676,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -739,7 +748,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,7 +888,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -902,12 +912,18 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets);
+
return 0;
}
@@ -1676,7 +1692,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1786,10 +1803,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1917,7 +1937,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..2eaea6b04cbe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -185,7 +187,8 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..908081c85de1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 9dc4bff8085e..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 3ef67d2e0d9f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..193e2835bd4d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b4564f71a7a..11d244891393 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
kernel_queue_init_v9(&kq->ops_asic_specific);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kernel_queue_init_v10(&kq->ops_asic_specific);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..4a236b2c2354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 2c8624c5b42c..83ef4b3dd2fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
pm->pmf = &kfd_v9_pm_funcs;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pm->pmf = &kfd_v10_pm_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c89326125d71..060a9e8b301e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -179,10 +183,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -230,6 +230,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -687,7 +688,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -1040,6 +1041,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 40e3fc0c6942..10f9af5784f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
@@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..2659d226c056 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 7551761f2aa9..69bd0628fdc6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
@@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev = container_of(attr, struct kfd_topology_device,
attr_name);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index d4718d58d0f2..15843e0fc756 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -102,6 +102,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +124,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +143,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 71991a28a775..313183b80032 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0
depends on DRM_AMD_DC && X86
depends on DRM_AMD_DC_DCN1_0
help
- Choose this option if you want to have
- Navi support for display engine
+ Choose this option if you want to have
+ Navi support for display engine
config DRM_AMD_DC_DCN2_1
- bool "DCN 2.1 family"
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Renoir support for display engine
+ bool "DCN 2.1 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN2_0
+ help
+ Choose this option if you want to have
+ Renoir support for display engine
config DRM_AMD_DC_DSC_SUPPORT
bool "DSC support"
@@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT
depends on DRM_AMD_DC_DCN1_0
depends on DRM_AMD_DC_DCN2_0
help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Choose this option if you want to have
+ Dynamic Stream Compression support
+
+config DRM_AMD_DC_HDCP
+ bool "Enable HDCP support in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to support
+ HDCP authentication
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 496cee000f10..36b3d6a5d04d 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
+endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DAL_LIBS += modules/hdcp
+endif
+
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 94911871eb9b..9a3b7bf8ab0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
+ifdef CONFIG_DRM_AMD_DC_HDCP
+AMDGPUDM += amdgpu_dm_hdcp.o
+endif
+
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c67d3c41db19..8c250fd07015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,9 @@
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
#include "amdgpu_pm.h"
#include "amd_shared.h"
@@ -67,6 +70,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -263,6 +267,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +418,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
}
}
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -646,11 +664,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct dc_callback_init init_params;
+#endif
+
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&init_params, 0, sizeof(init_params));
+#endif
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
@@ -713,6 +738,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -723,6 +750,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -764,6 +803,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc)
+ dc_deinit_callbacks(adev->dm.dc);
+#endif
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -897,27 +946,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int ret = 0;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
- aconnector, aconnector->base.base.id);
+ aconnector,
+ aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
- return ret;
- }
+ aconnector->dc_link->type =
+ dc_connection_single;
+ break;
}
+ }
}
+ drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
@@ -940,6 +991,11 @@ static int dm_late_init(void *handle)
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
@@ -955,14 +1011,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
@@ -973,15 +1028,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
- ret = drm_dp_mst_topology_mgr_resume(mgr);
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
@@ -989,7 +1043,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -1019,7 +1073,7 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1163,6 +1217,7 @@ static int dm_resume(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
@@ -1185,17 +1240,18 @@ static int dm_resume(void *handle)
/* program HPD filter */
dc_resume(dm->dc);
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
- s3_handle_mst(ddev, false);
-
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
/* Do detection*/
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -1223,6 +1279,7 @@ static int dm_resume(void *handle)
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
+ drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1438,6 +1495,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1452,6 +1514,9 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct amdgpu_device *adev = dev->dev_private;
+#endif
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1459,6 +1524,10 @@ static void handle_hpd_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1577,6 +1646,12 @@ static void handle_hpd_rx_irq(void *param)
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ union hpd_irq_data hpd_irq_data;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1586,7 +1661,12 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1621,6 +1701,10 @@ static void handle_hpd_rx_irq(void *param)
drm_kms_helper_hotplug_event(dev);
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
@@ -3311,8 +3395,9 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
-
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
@@ -3322,6 +3407,9 @@ static void fill_stream_properties_from_drm_display_mode(
if (drm_mode_is_420_only(info, mode_in)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3346,6 +3434,13 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
}
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
timing_out->h_sync_width =
@@ -3566,6 +3661,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->dm_stream_context = aconnector;
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3621,8 +3719,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_link_get_link_cap(aconnector->dc_link));
if (dsc_caps.is_dsc_supported)
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg))
@@ -3639,6 +3738,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+
finish:
dc_sink_release(sink);
@@ -4494,7 +4596,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
@@ -5088,6 +5190,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ drm_connector_attach_content_protection_property(&aconnector->base, false);
+#endif
}
}
@@ -5330,6 +5436,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+ const struct drm_connector_state *old_state,
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* CP is being re enabled, ignore this */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+ * hot-plug, headless s3, dpms
+ */
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+ aconnector->dc_sink != NULL)
+ return true;
+
+ if (old_state->content_protection == state->content_protection)
+ return false;
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ return true;
+
+ return false;
+}
+
+static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
+ else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
+
+}
+#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -5870,6 +6023,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -6254,6 +6408,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ new_crtc_state = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ continue;
+ }
+
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+ }
+#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6293,9 +6471,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
+ stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -7164,7 +7343,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
-
+ stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c8c525a2b505..77c5166e6b08 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps {
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
+ * @backlight_link: Link on which to control backlight
+ * @backlight_caps: Capabilities of the backlight device
+ * @freesync_module: Module handling freesync calculations
+ * @fw_dmcu: Reference to DMCU firmware
+ * @dmcu_fw_version: Version of the DMCU firmware
+ * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
@@ -128,7 +134,7 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
- * @atomic_obj
+ * @atomic_obj:
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
@@ -225,6 +231,9 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct hdcp_workqueue *hdcp_workqueue;
+#endif
struct drm_atomic_state *cached_state;
@@ -234,6 +243,8 @@ struct amdgpu_display_manager {
uint32_t dmcu_fw_version;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
+ * @soc_bounding_box:
+ *
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
@@ -287,6 +298,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_address;
uint32_t debugfs_dpcd_size;
#endif
+ bool force_yuv420_output;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a549c7c717dd..eaad9099bc0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
}
/* Configure dithering */
- if (!dm_need_crc_dither(source))
+ if (!dm_need_crc_dither(source)) {
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
- else
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_DISABLE);
+ } else {
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_AUTO);
+ }
unlock:
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f3dfb2887ae0..e29c6314f98c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -942,6 +942,33 @@ static const struct {
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+/*
+ * Force YUV420 output if available from the given mode
+ */
+static int force_yuv420_output_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ connector->force_yuv420_output = (bool)val;
+
+ return 0;
+}
+
+/*
+ * Check if YUV420 is forced when available from the given mode
+ */
+static int force_yuv420_output_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ *val = connector->force_yuv420_output;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
+ force_yuv420_output_set, "%llu\n");
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -955,6 +982,10 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
dp_debugfs_entries[i].fops);
}
}
+
+ debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
+ &force_yuv420_output_fops);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
new file mode 100644
index 000000000000..77181ddf6c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_hdcp.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dm_helpers.h"
+#include <drm/drm_hdcp.h>
+
+static bool
+lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+
+ struct dc_link *link = handle;
+ struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
+}
+
+static bool
+lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
+}
+
+static void process_output(struct hdcp_workqueue *hdcp_work)
+{
+ struct mod_hdcp_output output = hdcp_work->output;
+
+ if (output.callback_stop)
+ cancel_delayed_work(&hdcp_work->callback_dwork);
+
+ if (output.callback_needed)
+ schedule_delayed_work(&hdcp_work->callback_dwork,
+ msecs_to_jiffies(output.callback_delay));
+
+ if (output.watchdog_timer_stop)
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ if (output.watchdog_timer_needed)
+ schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
+ msecs_to_jiffies(output.watchdog_timer_delay));
+
+}
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ schedule_work(&hdcp_w->cpirq_work);
+}
+
+
+
+
+static void event_callback(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ callback_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+
+}
+static void event_property_update(struct work_struct *work)
+{
+
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ long ret;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+
+ if (aconnector->base.state->commit) {
+ ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+
+ if (ret == 0) {
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+
+ if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
+
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void event_property_validate(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work =
+ container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
+ struct mod_hdcp_display_query query;
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+
+ mutex_lock(&hdcp_work->mutex);
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+
+ if (query.encryption_status != hdcp_work->encryption_status) {
+ hdcp_work->encryption_status = query.encryption_status;
+ schedule_work(&hdcp_work->property_update_work);
+ }
+
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ mutex_unlock(&hdcp_work->mutex);
+}
+
+static void event_watchdog_timer(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work),
+ struct hdcp_workqueue,
+ watchdog_timer_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+static void event_cpirq(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+
+void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+{
+ int i = 0;
+
+ for (i = 0; i < hdcp_work->max_link; i++) {
+ cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
+ kfree(hdcp_work);
+
+}
+
+static void update_config(void *handle, struct cp_psp_stream_config *config)
+{
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ int link_index = aconnector->dc_link->link_index;
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+
+ if (aconnector->dc_sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+
+ display->controller = CONTROLLER_ID_D0 + config->otg_inst;
+ display->dig_fe = config->stream_enc_inst;
+ link->dig_be = config->link_enc_inst;
+ link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
+ link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->adjust.hdcp2.disable = 1;
+
+}
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ int i = 0;
+
+ if (hdcp_work == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->max_link = max_caps;
+
+ for (i = 0; i < max_caps; i++) {
+
+ mutex_init(&hdcp_work[i].mutex);
+
+ INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
+ INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
+ INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
+ INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
+ INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
+
+ hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
+ hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ }
+
+ cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->handle = hdcp_work;
+
+ return hdcp_work;
+
+fail_alloc_context:
+ kfree(hdcp_work);
+
+ return NULL;
+
+
+
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
new file mode 100644
index 000000000000..d3ba505d0696
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_
+#define AMDGPU_DM_AMDGPU_DM_HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp.h"
+#include "dc.h"
+#include "dm_cp_psp.h"
+
+struct mod_hdcp;
+struct mod_hdcp_link;
+struct mod_hdcp_display;
+struct cp_psp;
+
+struct hdcp_workqueue {
+ struct work_struct cpirq_work;
+ struct work_struct property_update_work;
+ struct delayed_work callback_dwork;
+ struct delayed_work watchdog_timer_dwork;
+ struct delayed_work property_validate_dwork;
+ struct amdgpu_dm_connector *aconnector;
+ struct mutex mutex;
+
+ struct mod_hdcp hdcp;
+ struct mod_hdcp_output output;
+ struct mod_hdcp_display display;
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
+ uint8_t max_link;
+};
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector);
+void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_destroy(struct hdcp_workqueue *work);
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ee1dc75f5ddc..11e5784aa62a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count <= 0) {
- DRM_INFO("SADs count is: %d, don't need to read it\n",
- sad_count);
+ if (sad_count < 0)
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return result;
- }
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
@@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream)
{
@@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_port)
- return false;
+ return ACT_FAILED;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
- return false;
+ return ACT_FAILED;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
- return false;
+ return ACT_FAILED;
- return true;
+ return ACT_SUCCESS;
}
bool dm_helpers_dp_mst_send_payload_allocation(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fa5d503d379c..64445c4cc4c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
@@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
true);
}
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
@@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
false);
}
}
+ drm_connector_list_iter_end(&iter);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5ec14efd4d8c..3c1f8258291c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -123,31 +123,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result;
}
-static enum drm_connector_status
-dm_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
-
- enum drm_connector_status status =
- drm_dp_mst_detect_port(
- connector,
- &master->mst_mgr,
- aconnector->port);
-
- return status;
-}
-
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
- if (amdgpu_dm_connector->edid) {
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
- }
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
@@ -177,7 +160,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
}
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
- .detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
@@ -245,17 +227,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *
+dm_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+}
+
+static int
+dm_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
- return &amdgpu_dm_connector->mst_encoder->base;
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ aconnector->port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = dm_mst_best_encoder,
+ .atomic_best_encoder = dm_mst_atomic_best_encoder,
+ .detect_ctx = dm_dp_mst_detect,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -417,6 +411,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ return;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index f4cfa0caeba8..7add40dea9b7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.funcs &&
- adev->smu.funcs->set_watermarks_for_clock_ranges)
+ else
smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
wm_with_clock_ranges.wm_dmif_clocks_ranges;
@@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- if (!smu->funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
- * 1: fail
- */
- if (smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges))
- return PP_SMU_RESULT_UNSUPPORTED;
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
return PP_SMU_RESULT_OK;
}
@@ -901,6 +891,90 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+enum pp_smu_status pp_rn_get_dpm_clock_table(
+ struct pp_smu *pp, struct dpm_clocks *clock_table)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table(smu, clock_table))
+ return PP_SMU_RESULT_OK;
+
+ return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+ wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+ wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!smu->funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_mhz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_mhz;
+ }
+
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+
+ return PP_SMU_RESULT_OK;
+}
+#endif
+
void dm_pp_get_funcs(
struct dc_context *ctx,
struct pp_smu_funcs *funcs)
@@ -945,6 +1019,15 @@ void dm_pp_get_funcs(
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
#endif
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ case DCN_VERSION_2_1:
+ funcs->ctx.ver = PP_SMU_VER_RN;
+ funcs->rn_funcs.pp_smu.dm = ctx;
+ funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
+ funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
+ break;
+#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 627982cb15d2..a160512a2f04 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -48,6 +48,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DC_LIBS += hdcp
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 221e0f56389f..823843cd2613 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
/* Sort voltage table from low to high*/
if (result == BP_RESULT_OK) {
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j].max_supported_clk <
info->disp_clk_voltage[j-1].max_supported_clk) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index dff65c0fe82f..7873abea4112 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
-
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j-1].max_supported_clk
) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index c43797bea413..8828dd9c3783 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (dc->hwss.exit_optimized_pwr_state)
+ dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
+
+ if (edp_link) {
+ clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ dc_link_set_psr_allow_active(edp_link, false, false);
+ }
+
+}
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link)
+ dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
+
+ if (dc->hwss.optimize_pwr_state)
+ dc->hwss.optimize_pwr_state(dc, dc->current_state);
+
+}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 47f529ce280a..5b3d36d41822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
ASSERT(clk_mgr->pp_smu);
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 3e8ac303bd52..ecd2cb4840e3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -104,6 +104,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
{
int i;
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
@@ -113,75 +114,28 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz, false);
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
-static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
-
- uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
-
- REG_UPDATE(DENTIST_DISPCLK_CNTL,
- DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
-}
-
-static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
-{
+ * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
+ * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
+// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
}
-static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dispclk_khz = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-
- update_display_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-}
-
-static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dppclk_khz = khz;
- clk_mgr->dccg->ref_dppclk = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
- update_global_dpp_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-}
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
@@ -192,11 +146,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
+ bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
- int i;
if (dc->work_arounds.skip_clock_update)
return;
@@ -251,12 +206,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
@@ -264,48 +217,35 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
}
- if (dc->config.forced_clocks == false) {
- // First update display clock
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
- request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
-
- // Updating DPP clock requires some more logic
- if (!safe_to_lower) {
- // For pre-programming, we need to make sure any DPP clock that will go up has to go up
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- // First raise the global reference if needed
- if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
-
- // Then raise any dividers that need raising
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
+ update_dppclk = true;
+ }
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
- }
+ update_dispclk = true;
+ }
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dentist(clk_mgr);
} else {
- // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
-
- if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
-
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
-
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
-
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
- }
+ // if clock is being raised, increase refclk before lowering DTO
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ if (update_dppclk)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
}
if (update_dispclk &&
@@ -409,12 +349,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
}
}
+static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->socclk_khz != b->socclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
.enable_pme_wa = dcn2_enable_pme_wa,
.get_clock = dcn2_get_clock,
+ .are_clock_states_equal = dcn2_are_clock_states_equal,
};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index ac31a9787305..c9fd824f3c23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 787f94d815f4..b647e0320e4b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -52,6 +52,45 @@
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
+int rn_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool hdmi_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ hdmi_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back back on*/
+ if (display_count == 0 && hdmi_present)
+ display_count = 1;
+
+ return display_count;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- if (display_count == 0)
- enter_display_off = true;
+ if (dc->work_arounds.skip_clock_update)
+ return;
- if (enter_display_off == safe_to_lower) {
- rn_vbios_smu_set_display_count(clk_mgr, display_count);
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_OPTIMIZED) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_OPTIMIZED);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_OPTIMIZED;
+ }
+ }
+ } else {
+ /* check that we're not already in the normal state */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_NORMAL) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_NORMAL);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+ }
}
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
@@ -319,7 +377,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
- s->dprefclk_khz = sb.dprefclk;
+ s->dprefclk_khz = sb.dprefclk * 1000;
}
void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -329,10 +387,19 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
+void rn_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+}
+
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
- .init_clocks = dcn2_init_clocks,
+ .init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
/* .dump_clk_registers = rn_dump_clk_registers */
};
@@ -405,7 +472,7 @@ struct clk_bw_params rn_bw_params = {
}
};
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+void rn_build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
@@ -462,23 +529,50 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
}
-void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
+ for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+ if (clock_table->DcfClocks[i].Vol == voltage)
+ return clock_table->DcfClocks[i].Freq;
+ }
+
+ ASSERT(0);
+ return 0;
+}
+
+void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+{
+ int i, j = 0;
+
+ j = -1;
+
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
- for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
- if (clock_table->FClocks[i].Freq == 0)
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+ if (clock_table->FClocks[i].Freq != 0) {
+ j = i;
break;
+ }
+ }
+
+ if (j == -1) {
+ /* clock table is all 0s, just use our own hardcode */
+ ASSERT(0);
+ return;
+ }
+
+ bw_params->clk_table.num_entries = j + 1;
- bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
- bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
- bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
+ for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
+ bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
- bw_params->clk_table.num_entries = i;
bw_params->vram_type = asic_id->vram_type;
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
@@ -486,7 +580,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
- if (clock_table->FClocks[i].Freq == 0) {
+ if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
@@ -547,25 +641,24 @@ void rn_clk_mgr_construct(
clk_mgr->dentist_vco_freq_khz = 3600000;
rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
- clk_mgr->base.dprefclk_khz = s.dprefclk;
-
- if (clk_mgr->base.dprefclk_khz != 600000) {
- clk_mgr->base.dprefclk_khz = 600000;
- ASSERT(1); //TODO: Renoir follow up.
- }
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
/* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dprefclk_khz == 0)
+ if (clk_mgr->base.dprefclk_khz == 0) {
+ ASSERT(clk_mgr->base.dprefclk_khz == 600000);
clk_mgr->base.dprefclk_khz = 600000;
+ }
}
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = &rn_bw_params;
- if (pp_smu) {
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+ rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
}
/*
@@ -576,15 +669,16 @@ void rn_clk_mgr_construct(
if (!debug->disable_pplib_wm_range) {
struct pp_smu_wm_range_sets ranges = {0};
- build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
+ rn_build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
}
- /* enable powerfeatures when displaycount goes to 0 */
- if (!debug->disable_48mhz_pwrdwn)
- rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
+ /* enable powerfeatures when displaycount goes to 0 */
+ rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index aadec06fde10..761bfda970a5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -26,11 +26,20 @@
#ifndef __RN_CLK_MGR_H__
#define __RN_CLK_MGR_H__
+#include "clk_mgr.h"
+#include "dm_pp_smu.h"
+
struct rn_clk_registers {
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
};
-
+void rn_build_watermark_ranges(
+ struct clk_bw_params *bw_params,
+ struct pp_smu_wm_range_sets *ranges);
+void rn_clk_mgr_helper_populate_bw_params(
+ struct clk_bw_params *bw_params,
+ struct dpm_clocks *clock_table,
+ struct hw_asic_id *asic_id);
void rn_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 50984c1811bb..5647fcf10717 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,7 +33,7 @@
#include "mp/mp_12_0_0_sh_mask.h"
#define REG(reg_name) \
- (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
@@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
int actual_dispclk_set_mhz = -1;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- uint32_t clk = requested_dispclk_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
- clk);
+ requested_dispclk_khz / 1000);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
@@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
{
int actual_dcfclk_set_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_dcfclk_set_mhz;
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
{
int actual_min_ds_dcfclk_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_min_ds_dcfclk_mhz;
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
@@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
{
int actual_dppclk_set_mhz = -1;
- uint32_t clk = requested_dpp_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
-
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
- clk);
+ requested_dpp_khz / 1000);
return actual_dppclk_set_mhz * 1000;
}
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count)
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
{
+ int disp_count;
+
+ if (state == DCN_PWR_STATE_OPTIMIZED)
+ disp_count = 0;
+ else
+ disp_count = 1;
+
rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDisplayCount,
- display_count);
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayCount,
+ disp_count);
}
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr)
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
- 0);
+ enable);
}
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index da3a49487c6d..ccc01879c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count);
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5d1adeda4d90..41b51f43a64b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
return false;
}
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i = 0;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
@@ -761,8 +782,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
#endif
- dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+#endif
}
current_ctx = dc->current_state;
@@ -785,9 +811,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (false == construct(dc, init_params))
goto construct_fail;
- /*TODO: separate HW and SW initialization*/
- dc->hwss.init_hw(dc);
-
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
full_pipe_count--;
@@ -820,9 +843,24 @@ alloc_fail:
return NULL;
}
+void dc_hardware_init(struct dc *dc)
+{
+ dc->hwss.init_hw(dc);
+}
+
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ dc->ctx->cp_psp = init_params->cp_psp;
+#endif
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+#endif
}
void dc_destroy(struct dc **dc)
@@ -901,15 +939,11 @@ static void program_timing_sync(
/* set first pipe with plane as master */
for (j = 0; j < group_size; j++) {
- struct pipe_ctx *temp;
-
if (pipe_set[j]->plane_state) {
if (j == 0)
break;
- temp = pipe_set[0];
- pipe_set[0] = pipe_set[j];
- pipe_set[j] = temp;
+ swap(pipe_set[0], pipe_set[j]);
break;
}
}
@@ -966,40 +1000,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_crtc_timing *crtc_timing)
{
struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
struct dc_link *link = sink->link;
- unsigned int enc_inst, tg_inst;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ // Seamless port only support single DP and EDP so far
+ if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
+ sink->sink_signal != SIGNAL_TYPE_EDP)
+ return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (enc_inst >= dc->res_pool->pipe_count)
+ if (enc_inst == ENGINE_ID_UNKNOWN)
return false;
- if (enc_inst >= dc->res_pool->stream_enc_count)
- return false;
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
- tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
- dc->res_pool->stream_enc[enc_inst]);
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
+
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
if (tg_inst >= dc->res_pool->timing_generator_count)
return false;
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->is_matching_timing)
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ return false;
+
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
return false;
- if (!tg->funcs->is_matching_timing(tg, crtc_timing))
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
if (dc_is_dp_signal(link->connector_signal)) {
@@ -1012,6 +1093,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
}
return true;
@@ -1073,15 +1168,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mode_changed)
- continue;
+ if (dc->hwss.apply_ctx_for_surface)
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context); /* use new pipe config in new context */
- }
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1100,16 +1200,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
/*
* enable stereo
@@ -1492,20 +1597,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- update_flags->raw = 0; // Reset all flags
-
if (u->flip_addr)
update_flags->bits.addr_update = 1;
- if (!is_surface_in_context(context, u->surface)) {
- update_flags->bits.new_plane = 1;
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
- if (u->surface->force_full_update) {
- update_flags->bits.full_update = 1;
- return UPDATE_TYPE_FULL;
- }
+ update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1563,40 +1663,43 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
- return UPDATE_TYPE_FULL;
+ overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
- if ((stream_update->src.height != 0) &&
- (stream_update->src.width != 0))
- return UPDATE_TYPE_FULL;
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
- if ((stream_update->dst.height != 0) &&
- (stream_update->dst.width != 0))
- return UPDATE_TYPE_FULL;
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0))
+ su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.wb_update = 1;
#endif
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
- if (type == UPDATE_TYPE_FULL)
- return type;
-
elevate_update_type(&overall_type, type);
}
@@ -1618,16 +1721,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL)
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
- if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
- dc->optimized_required = true;
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+ }
return type;
}
@@ -1868,6 +1984,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
struct dc_state *context)
{
int j;
+ bool should_program_abm;
// Stream updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -1948,14 +2065,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
- // if otg funcs defined check if blanked before programming
- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ } else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
- } else
- pipe_ctx->stream_res.abm->funcs->set_abm_level(
- pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
}
}
}
@@ -2000,7 +2124,13 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
- dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
+
return;
}
@@ -2060,10 +2190,15 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
- dc->hwss.apply_ctx_for_surface(
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca20b150afcc..935053664160 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -79,7 +79,6 @@ static void destruct(struct dc_link *link)
int i;
if (link->hpd_gpio != NULL) {
- dal_gpio_close(link->hpd_gpio);
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
-static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
@@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
@@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
-bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_alt_mode(struct dc_link *link)
{
/**
@@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link)
* This does not create remote sinks but will trigger DM
* to start MST detection if a branch is detected.
*/
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+static bool dc_link_detect_helper(struct dc_link *link,
+ enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
@@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
+ bool perform_dp_seamless_boot = false;
+
DC_LOGGER_INIT(link->ctx->logger);
if (dc_is_virtual_signal(link->connector_signal))
@@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
- read_edp_current_link_settings_on_detect(link);
+ read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
- sink_caps.transaction_type =
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
+
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
@@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- link->verified_link_cap = link->reported_link_cap;
+ dp_verify_mst_link_cap(link);
+
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
+ // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
+ if (reason == DETECT_REASON_BOOT &&
+ dc_ctx->dc->config.power_down_display_on_boot == false &&
+ link->link_status.link_active == true)
+ perform_dp_seamless_boot = true;
+
+ if (perform_dp_seamless_boot) {
+ read_current_link_settings_on_detect(link);
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
break;
}
@@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* two link trainings
*/
- /* deal with non-mst cases */
- dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ // verify link cap for SST non-seamless boot
+ if (!perform_dp_seamless_boot)
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_sink_release(prev_sink);
return true;
+
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ const struct dc *dc = link->dc;
+ bool ret;
+
+ /* get out of low power state */
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
+ ret = dc_link_detect_helper(link, reason);
+
+ /* Go back to power optimized state */
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+
+ return ret;
}
bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (!apply_seamless_boot_optimization)
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
@@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_set_fec_ready(link, false);
}
#endif
- } else
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
/* MST disable link only when no stream use the link */
@@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing(
break;
}
- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
@@ -2381,13 +2421,17 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
return true;
}
-bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
- dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
+
+ link->psr_allow_active = allow_active;
return true;
}
@@ -2510,7 +2554,7 @@ static void update_mst_stream_alloc_table(
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
*/
-static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
@@ -2521,6 +2565,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
+ enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2568,14 +2613,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&link->mst_stream_alloc_table);
/* send down message */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -2667,6 +2714,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ if (cp_psp && cp_psp->funcs.update_stream_config) {
+ struct cp_psp_stream_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dpms_off = dpms_off;
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+ }
+}
+#endif
void core_link_enable_stream(
struct dc_state *state,
@@ -2677,6 +2742,10 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
@@ -2727,6 +2796,9 @@ void core_link_enable_stream(
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2734,6 +2806,9 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2786,13 +2861,16 @@ void core_link_enable_stream(
#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ dc_link_allocate_mst_payload(pipe_ctx);
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
@@ -2810,6 +2888,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 505967b48e14..9a56f110bbd1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -496,7 +496,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret;
+ bool ret = false;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -515,34 +515,32 @@ bool dal_ddc_service_query_ddc_data(
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
- struct aux_payload write_payload = {
- .i2c_over_aux = true,
- .write = true,
- .mot = true,
- .address = address,
- .length = write_size,
- .data = write_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- struct aux_payload read_payload = {
- .i2c_over_aux = true,
- .write = false,
- .mot = false,
- .address = address,
- .length = read_size,
- .data = read_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
+ struct aux_payload payload;
+ bool read_available = true;
+
+ payload.i2c_over_aux = true;
+ payload.address = address;
+ payload.reply = NULL;
+ payload.defer_delay = get_defer_delay(ddc);
+
+ if (write_size != 0) {
+ payload.write = true;
+ payload.mot = false;
+ payload.length = write_size;
+ payload.data = write_buf;
+
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ read_available = ret;
+ }
- if (!ret)
- return false;
+ if (read_size != 0 && read_available) {
+ payload.write = false;
+ payload.mot = false;
+ payload.length = read_size;
+ payload.data = read_buf;
- ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ }
} else {
struct i2c_payloads *payloads =
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -573,6 +571,41 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
+{
+ uint8_t retrieved = 0;
+ bool ret = 0;
+
+ if (!ddc)
+ return false;
+
+ if (!payload)
+ return false;
+
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ payload->length ? true : false;
+
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
+ current_payload.mot = !is_end_of_payload;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
+
+ ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
+
+ retrieved += current_payload.length;
+ } while (retrieved < payload->length && ret == true);
+
+ return ret;
+}
+
/* dc_link_aux_transfer_raw() - Attempt to transfer
* the given aux payload. This function does not perform
* retries or handle error states. The reply is returned
@@ -601,6 +634,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
return dce_aux_transfer_with_retries(ddc, payload);
}
+
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout)
+{
+ enum dc_status status = DC_OK;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
+ return DC_ERROR_UNEXPECTED;
+ if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
+ status = DC_ERROR_UNEXPECTED;
+ return status;
+}
+
/*test only function*/
void dal_ddc_service_set_ddc_pin(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f5742719b5d9..0f59b68aa4c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (link->link_enc->funcs->get_max_link_cap)
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
max_link_cap.lane_count =
@@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries(
for (i = 0; i < attempts; i++) {
int fail_count = 0;
- enum dc_connection_type type;
+ enum dc_connection_type type = dc_connection_none;
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type)) {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+ link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
&link->reported_link_cap,
@@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries(
return success;
}
+bool dp_verify_mst_link_cap(
+ struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+
+ return true;
+}
+
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
@@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_enabled)
+ if (!link->psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_enable(link, false, true);
- dc_link_set_psr_enable(link, true, true);
+ dc_link_set_psr_allow_active(link, false, true);
+ dc_link_set_psr_allow_active(link, true, true);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
+ struct pipe_ctx *pipe_ctx;
+ int i;
if (out_link_loss)
*out_link_loss = false;
@@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&link->cur_link_settings,
true, LINK_TRAINING_ATTEMPTS);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -2545,6 +2575,7 @@ static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
/* decode converter info*/
if (!ds_port.fields.PORT_PRESENT) {
@@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
* keep receiver powered all the time.*/
case DP_BRANCH_DEVICE_ID_0010FA:
case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
link->wa_flags.dp_keep_receiver_powered = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 79438c4f1e20..a519dbc5ecb6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if (pipes[i].stream != NULL &&
!pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL) {
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->link == link) {
udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8f70295179ff..25da0c45d828 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1848,28 +1848,28 @@ static int acquire_resource_from_hw_enabled_state(
struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- unsigned int inst, tg_inst;
+ unsigned int i, inst, tg_inst = 0;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return -1;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (inst >= pool->pipe_count)
- return -1;
+ if (inst == ENGINE_ID_UNKNOWN)
+ return false;
- if (inst >= pool->stream_enc_count)
- return -1;
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i]->id == inst) {
+ tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
+ pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
+ // tg_inst not found
+ if (i == pool->stream_enc_count)
+ return false;
if (tg_inst >= pool->timing_generator_count)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bf1d7bb90e0f..bb09243758fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dwb->funcs->is_enabled(dwb)) {
/* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info);
+ dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
} else {
/* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a82352a87808..5967106826ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.48"
+#define DC_VER "3.2.54"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -111,19 +111,20 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ bool extended_aux_timeout_support;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hw_3d_lut;
#endif
struct dc_plane_cap planes[MAX_PLANES];
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa {
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool no_connect_phy_config;
bool dedcn20_305_wa;
+#endif
bool skip_clock_update;
};
-#endif
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -220,6 +221,7 @@ struct dc_config {
bool power_down_display_on_boot;
bool edp_not_connected;
bool forced_clocks;
+ bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
};
@@ -245,6 +247,18 @@ enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
+enum dtm_pstate{
+ dtm_level_p0 = 0,/*highest voltage*/
+ dtm_level_p1,
+ dtm_level_p2,
+ dtm_level_p3,
+ dtm_level_p4,/*when active_display_count = 0*/
+};
+
+enum dcn_pwr_state {
+ DCN_PWR_STATE_OPTIMIZED = 0,
+ DCN_PWR_STATE_NORMAL = 1
+};
/*
* For any clocks that may differ per pipe
@@ -252,11 +266,7 @@ enum wm_report_mode {
*/
struct dc_clocks {
int dispclk_khz;
- int max_supported_dppclk_khz;
- int max_supported_dispclk_khz;
int dppclk_khz;
- int bw_dppclk_khz; /*a copy of dppclk_khz*/
- int bw_dispclk_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -264,12 +274,17 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-
+ enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
* optimization required
*/
bool prev_p_state_change_support;
+ enum dtm_pstate dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz; /*a copy of dppclk_khz*/
+ int bw_dispclk_khz;
};
struct dc_bw_validation_profile {
@@ -347,6 +362,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
+ int dsc_min_slice_height_override;
#endif
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
@@ -462,9 +478,7 @@ struct dc {
struct dc_config config;
struct dc_debug_options debug;
struct dc_bounding_box_overrides bb_overrides;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa work_arounds;
-#endif
struct dc_context *ctx;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
@@ -553,10 +567,16 @@ struct dc_init_data {
};
struct dc_callback_init {
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#else
uint8_t reserved;
+#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
+void dc_hardware_init(struct dc *dc);
+
int dc_get_vmid_use_vector(struct dc *dc);
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
@@ -565,6 +585,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
+void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 6e42209f0e20..0ed2962add5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -30,6 +30,7 @@
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+#include "dc_types.h"
struct dc_dsc_bw_range {
uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */
@@ -39,13 +40,21 @@ struct dc_dsc_bw_range {
uint32_t stream_kbps; /* Uncompressed stream bandwidth */
};
+struct display_stream_compressor {
+ const struct dsc_funcs *funcs;
+#ifndef AMD_EDID_UTILITY
+ struct dc_context *ctx;
+ int inst;
+#endif
+};
bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_kbps,
const uint32_t max_kbps,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 0b8700a8a94a..e0856bb8511f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,6 +26,8 @@
#ifndef DC_HW_TYPES_H
#define DC_HW_TYPES_H
+#ifndef AMD_EDID_UTILITY
+
#include "os_types.h"
#include "fixed31_32.h"
#include "signal_types.h"
@@ -124,20 +126,6 @@ struct plane_size {
int chroma_pitch;
struct rect surface_size;
struct rect chroma_size;
-
- union {
- struct {
- struct rect surface_size;
- int surface_pitch;
- } grph;
-
- struct {
- struct rect luma_size;
- int luma_pitch;
- struct rect chroma_size;
- int chroma_pitch;
- } video;
- };
};
struct dc_plane_dcc_param {
@@ -148,21 +136,6 @@ struct dc_plane_dcc_param {
int meta_pitch_c;
bool independent_64b_blks_c;
-
- union {
- struct {
- int meta_pitch;
- bool independent_64b_blks;
- } grph;
-
- struct {
- int meta_pitch_l;
- bool independent_64b_blks_l;
-
- int meta_pitch_c;
- bool independent_64b_blks_c;
- } video;
- };
};
/*Displayable pixel format in fb*/
@@ -605,6 +578,11 @@ enum dc_quantization_range {
QUANTIZATION_RANGE_LIMITED
};
+enum dc_dynamic_expansion {
+ DYN_EXPANSION_AUTO,
+ DYN_EXPANSION_DISABLE
+};
+
/* XFM */
/* used in struct dc_plane_state */
@@ -616,6 +594,8 @@ struct scaling_taps {
bool integer_scaling;
};
+#endif /* AMD_EDID_UTILITY */
+
enum dc_timing_standard {
DC_TIMING_STANDARD_UNDEFINED,
DC_TIMING_STANDARD_DMT,
@@ -737,30 +717,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-enum trigger_delay {
- TRIGGER_DELAY_NEXT_PIXEL = 0,
- TRIGGER_DELAY_NEXT_LINE,
-};
-
-enum crtc_event {
- CRTC_EVENT_VSYNC_RISING = 0,
- CRTC_EVENT_VSYNC_FALLING
-};
-
-struct crtc_trigger_info {
- bool enabled;
- struct dc_stream_state *event_source;
- enum crtc_event event;
- enum trigger_delay delay;
-};
-
-struct dc_crtc_timing_adjust {
- uint32_t v_total_min;
- uint32_t v_total_max;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
-};
-
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
@@ -804,6 +760,33 @@ struct dc_crtc_timing {
#endif
};
+#ifndef AMD_EDID_UTILITY
+
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
+
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+ uint32_t v_total_mid;
+ uint32_t v_total_mid_frame_num;
+};
+
+
/* Passed on init */
enum vram_type {
VIDEO_MEMORY_TYPE_GDDR5 = 2,
@@ -874,5 +857,7 @@ struct tg_color {
uint16_t color_b_cb;
};
+#endif /* AMD_EDID_UTILITY */
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 9ea75db3484e..f24fd19ed93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -126,7 +126,8 @@ struct dc_link {
unsigned short chip_caps;
unsigned int dpcd_sink_count;
enum edp_revision edp_revision;
- bool psr_enabled;
+ bool psr_feature_enabled;
+ bool psr_allow_active;
/* MST record stream using this link */
struct link_flags {
@@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
+static inline struct dc_link *get_edp_link(const struct dc *dc)
+{
+ int i;
+
+ // report any eDP links, even unconnected DDI's
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
@@ -192,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 0fa1c26bc20d..fdb6adc37857 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -113,6 +113,21 @@ struct periodic_interrupt_config {
int lines_offset;
};
+union stream_update_flags {
+ struct {
+ uint32_t scaling:1;
+ uint32_t out_tf:1;
+ uint32_t out_csc:1;
+ uint32_t abm_level:1;
+ uint32_t dpms_off:1;
+ uint32_t gamut_remap:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ uint32_t wb_update:1;
+#endif
+ } bits;
+
+ uint32_t raw;
+};
struct dc_stream_state {
// sink is deprecated, new code should not reference
@@ -214,9 +229,14 @@ struct dc_stream_state {
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
#endif
+ union stream_update_flags update_flags;
};
+#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+
struct dc_stream_update {
+ struct dc_stream_state *stream;
+
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
@@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
int num_streams,
const struct dc_static_screen_events *events);
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option);
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b273735b6a3e..d9be8fc3889f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,6 +25,11 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
+#ifndef AMD_EDID_UTILITY
+/* AND EdidUtility only needs a portion
+ * of this file, including the rest only
+ * causes additional issues.
+ */
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
@@ -33,6 +38,10 @@
#include "dal_types.h"
#include "grph_object_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
+
/* forward declarations */
struct dc_plane_state;
struct dc_stream_state;
@@ -100,6 +109,9 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#endif
};
@@ -159,6 +171,12 @@ enum dc_edid_status {
EDID_THE_SAME,
};
+enum act_return_status {
+ ACT_SUCCESS,
+ ACT_LINK_LOST,
+ ACT_FAILED
+};
+
/* audio capability from EDID*/
struct dc_cea_audio_mode {
uint8_t format_code; /* ucData[0] [6:3]*/
@@ -739,6 +757,9 @@ struct dc_clock_config {
uint32_t current_clock_khz;/*current clock in use*/
};
+#endif /*AMD_EDID_UTILITY*/
+//AMD EDID UTILITY does not need any of the above structures
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
@@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
#endif
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 58bd131d5b48..d759fdca7fdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
return true;
}
@@ -489,9 +492,6 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
- if (abm_dce->base.dmcu_is_running == true)
- abm_dce->base.funcs->set_abm_immediate_disable(*abm);
-
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index c3f9f4185ce8..976bd4987a28 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,6 +42,10 @@
#include "reg_helper.h"
+#undef FN
+#define FN(reg_name, field_name) \
+ aux110->shift->field_name, aux110->mask->field_name
+
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
@@ -55,6 +59,14 @@ enum {
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
+
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+
static void release_engine(
struct dce_aux *engine)
{
@@ -198,7 +210,7 @@ static void submit_channel_request(
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
/* set the delay and the number of bytes to write */
@@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status(
/* poll to make sure that SW_DONE is asserted */
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
@@ -414,20 +426,82 @@ void dce110_engine_destroy(struct dce_aux **engine)
*engine = NULL;
}
+
+static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout_in_us)
+{
+ uint32_t multiplier = 0;
+ uint32_t length = 0;
+ uint32_t timeout = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+
+ /* 1-Update polling timeout period */
+ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
+
+ /* 2-Update aux timeout period length and multiplier */
+ if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ multiplier = 0;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
+ length++;
+ timeout = length * TIME_OUT_MULTIPLIER_8;
+ } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
+ multiplier = 1;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
+ length++;
+ timeout = length * TIME_OUT_MULTIPLIER_16;
+ } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
+ multiplier = 2;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
+ length++;
+ timeout = length * TIME_OUT_MULTIPLIER_32;
+ } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
+ multiplier = 3;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
+ length++;
+ timeout = length * TIME_OUT_MULTIPLIER_64;
+ }
+
+ length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+
+ REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
+
+ return true;
+}
+
+static struct dce_aux_funcs aux_functions = {
+ .configure_timeout = NULL,
+ .destroy = NULL,
+};
+
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs)
+ const struct dce110_aux_registers *regs,
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable)
{
aux_engine110->base.ddc = NULL;
aux_engine110->base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.inst = inst;
- aux_engine110->timeout_period = timeout_period;
+ aux_engine110->polling_timeout_period = timeout_period;
aux_engine110->regs = regs;
+ aux_engine110->mask = mask;
+ aux_engine110->shift = shift;
+ aux_engine110->base.funcs = &aux_functions;
+ if (is_ext_aux_timeout_configurable)
+ aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
+
return &aux_engine110->base;
}
@@ -475,7 +549,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
aux_req.action = i2caux_action_from_payload(payload);
aux_req.address = payload->address;
- aux_req.delay = payload->defer_delay * 10;
+ aux_req.delay = 0;
aux_req.length = payload->length;
aux_req.data = payload->data;
@@ -544,8 +618,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
+ } else {
+ if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
+ (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ }
+ }
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index ed7fec8fe253..b4b2c79a8073 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -29,6 +29,7 @@
#include "i2caux_interface.h"
#include "inc/hw/aux_engine.h"
+
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -36,6 +37,7 @@
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
#endif
@@ -55,6 +57,7 @@ struct dce110_aux_registers {
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
@@ -62,6 +65,156 @@ struct dce110_aux_registers {
uint32_t AUX_RESET_MASK;
};
+#define DCE_AUX_REG_FIELD_LIST(type)\
+ type AUX_EN;\
+ type AUX_RESET;\
+ type AUX_RESET_DONE;\
+ type AUX_REG_RW_CNTL_STATUS;\
+ type AUX_SW_USE_AUX_REG_REQ;\
+ type AUX_SW_DONE_USING_AUX_REG;\
+ type AUX_SW_AUTOINCREMENT_DISABLE;\
+ type AUX_SW_DATA_RW;\
+ type AUX_SW_INDEX;\
+ type AUX_SW_GO;\
+ type AUX_SW_DATA;\
+ type AUX_SW_REPLY_BYTE_COUNT;\
+ type AUX_SW_DONE;\
+ type AUX_SW_DONE_ACK;\
+ type AUXN_IMPCAL_ENABLE;\
+ type AUXP_IMPCAL_ENABLE;\
+ type AUXN_IMPCAL_OVERRIDE_ENABLE;\
+ type AUXP_IMPCAL_OVERRIDE_ENABLE;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL;\
+ type AUXN_CALOUT_ERROR_AK;\
+ type AUXP_CALOUT_ERROR_AK;\
+ type AUX_SW_START_DELAY;\
+ type AUX_SW_WR_BYTES
+
+#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* DCN10 MASK */
+#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* for all other DCN */
+#define DCN_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define AUX_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
@@ -97,20 +250,34 @@ struct dce_aux {
uint32_t max_defer_write_retry;
bool acquire_reset;
+ struct dce_aux_funcs *funcs;
+};
+
+struct dce110_aux_registers_mask {
+ DCE_AUX_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce110_aux_registers_shift {
+ DCE_AUX_REG_FIELD_LIST(uint8_t);
};
+
struct aux_engine_dce110 {
struct dce_aux base;
const struct dce110_aux_registers *regs;
+ const struct dce110_aux_registers_mask *mask;
+ const struct dce110_aux_registers_shift *shift;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
+ uint32_t aux_dphy_rx_control1;
+ uint32_t aux_dphy_rx_control0;
uint32_t aux_sw_status;
} addr;
- uint32_t timeout_period;
+ uint32_t polling_timeout_period;
};
struct aux_engine_dce110_init_data {
@@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data {
const struct dce110_aux_registers *regs;
};
-struct dce_aux *dce110_aux_engine_construct(
- struct aux_engine_dce110 *aux_engine110,
+struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs);
+ const struct dce110_aux_registers *regs,
+
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable);
void dce110_engine_destroy(struct dce_aux **engine);
@@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
+
+struct dce_aux_funcs {
+ bool (*configure_timeout)
+ (struct ddc_service *ddc,
+ uint32_t timeout);
+ void (*destroy)
+ (struct aux_engine **ptr);
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 0b86cee4876f..ba995d3f2318 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
- if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
- dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
-
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index ac04d77058f0..32d145a0d6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -679,6 +679,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 31b698bf9cfc..8aa937f496c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif(
}
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
@@ -636,11 +636,11 @@ static void dce_mi_free_dmif(
10, 3500);
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 76d54885374a..b5d6dff29c45 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -506,6 +506,14 @@ static const struct dce_mem_input_mask mi_masks = {
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
static struct mem_input *dce100_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
@@ -611,7 +619,10 @@ struct dce_aux *dce100_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -997,6 +1008,8 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
+ dc->caps.extended_aux_timeout_support = false;
+
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 01a924bf477a..9150e546dcf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
struct dc *core_dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
@@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
- if (core_dc->res_pool->pp_smu)
- pp_smu = core_dc->res_pool->pp_smu;
-
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
{
struct dc *dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
- if (dc->res_pool->pp_smu)
- pp_smu = dc->res_pool->pp_smu;
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1169,8 +1161,9 @@ static void build_audio_output(
}
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (state->clk_mgr &&
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
@@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_enabled = false;
+ pipe_ctx->stream->link->psr_feature_enabled = false;
return DC_OK;
}
@@ -1529,18 +1522,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
return NULL;
}
-static struct dc_link *get_edp_link(struct dc *dc)
-{
- int i;
-
- // report any eDP links, even unconnected DDI's
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
- return dc->links[i];
- }
- return NULL;
-}
-
static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
@@ -1834,7 +1815,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_enabled)
+ if (pipe_ctx->stream->link->psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2464,7 +2445,6 @@ static void dce110_program_front_end_for_pipe(
struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
@@ -2472,9 +2452,6 @@ static void dce110_program_front_end_for_pipe(
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
- if (dc->current_state)
- old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 89620adc81d8..c651a38e34a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_110_REG_LIST(id),\
@@ -657,7 +665,10 @@ struct dce_aux *dce110_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1293,6 +1304,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 21a657e79306..876ed0607718 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_DCE110_REG_LIST_DCE_BASE(id)\
@@ -630,7 +638,10 @@ struct dce_aux *dce112_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1163,7 +1174,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7c52f7f9196c..c30faa05fd27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE12_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE12_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_120_REG_LIST(id),\
@@ -404,7 +412,10 @@ struct dce_aux *dce120_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1006,7 +1017,7 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
-
+ dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
/*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 643ccb0ade00..59ebd657462d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
@@ -491,7 +499,10 @@ struct dce_aux *dce80_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -895,6 +906,7 @@ static bool dce80_construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d8b2da18db39..997e9582edc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
-static bool dpp_get_optimal_number_of_taps(
+bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e2c613611ac9..1d4a7d640334 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
+bool dpp1_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 001db49e4bb2..14d1be6c66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -841,6 +841,14 @@ void min_set_viewport(
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
void hubp1_read_state_common(struct hubp *hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index cb20d10288c0..ae70d9c0aa1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -47,6 +47,8 @@
SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
@@ -57,8 +59,12 @@
SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
@@ -150,6 +156,8 @@
uint32_t DCSURF_SEC_VIEWPORT_START; \
uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_START_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
@@ -160,8 +168,12 @@
uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \
uint32_t DCSURF_SURFACE_INUSE; \
uint32_t DCSURF_SURFACE_INUSE_HIGH; \
uint32_t DCSURF_SURFACE_INUSE_C; \
@@ -279,6 +291,10 @@
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
@@ -289,8 +305,12 @@
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
@@ -469,6 +489,10 @@
type PRI_VIEWPORT_HEIGHT_C; \
type PRI_VIEWPORT_X_START_C; \
type PRI_VIEWPORT_Y_START_C; \
+ type SEC_VIEWPORT_WIDTH_C; \
+ type SEC_VIEWPORT_HEIGHT_C; \
+ type SEC_VIEWPORT_X_START_C; \
+ type SEC_VIEWPORT_Y_START_C; \
type PRIMARY_SURFACE_ADDRESS_HIGH;\
type PRIMARY_SURFACE_ADDRESS;\
type SECONDARY_SURFACE_ADDRESS_HIGH;\
@@ -479,8 +503,12 @@
type SECONDARY_META_SURFACE_ADDRESS;\
type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_ADDRESS_C;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_SURFACE_ADDRESS_C;\
type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_C;\
type SURFACE_INUSE_ADDRESS;\
type SURFACE_INUSE_ADDRESS_HIGH;\
type SURFACE_INUSE_ADDRESS_C;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 60123db7ba02..df1be8ada66d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc)
int i;
bool allow_self_fresh_force_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+ return;
+#endif
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -1452,15 +1456,15 @@ static void log_tf(struct dc_context *ctx,
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
for (i = 0; i < hw_points_num; i++) {
- DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
- DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
}
@@ -2304,8 +2308,7 @@ void update_dchubp_dpp(
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
- pipe_ctx->plane_res.bw.dppclk_khz,
- false);
+ pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
@@ -2512,8 +2515,10 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 8bf5f0f2301d..88fcc395adf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -113,6 +113,20 @@ struct dcn10_link_enc_registers {
uint32_t DIG_LANE_ENABLE;
/* UNIPHY */
uint32_t CHANNEL_XBAR_CNTL;
+ /* DPCS */
+ uint32_t RDPCSTX_PHY_CNTL3;
+ uint32_t RDPCSTX_PHY_CNTL4;
+ uint32_t RDPCSTX_PHY_CNTL5;
+ uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSTX_PHY_CNTL7;
+ uint32_t RDPCSTX_PHY_CNTL8;
+ uint32_t RDPCSTX_PHY_CNTL9;
+ uint32_t RDPCSTX_PHY_CNTL10;
+ uint32_t RDPCSTX_PHY_CNTL11;
+ uint32_t RDPCSTX_PHY_CNTL12;
+ uint32_t RDPCSTX_PHY_CNTL13;
+ uint32_t RDPCSTX_PHY_CNTL14;
+ uint32_t RDPCSTX_PHY_CNTL15;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -250,6 +264,10 @@ struct dcn10_link_enc_registers {
type RDPCS_EXT_REFCLK_EN;\
type RDPCS_TX_FIFO_EN;\
type UNIPHY_LINK_ENABLE;\
+ type UNIPHY_CHANNEL0_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL1_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL2_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL3_XBAR_SOURCE;\
type UNIPHY_CHANNEL0_INVERT;\
type UNIPHY_CHANNEL1_INVERT;\
type UNIPHY_CHANNEL2_INVERT;\
@@ -337,16 +355,46 @@ struct dcn10_link_enc_registers {
type RDPCS_TX_FIFO_ERROR_MASK;\
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
+ type RDPCS_PHY_DPALT_DP4;\
type RDPCS_PHY_DPALT_DISABLE;\
type RDPCS_PHY_DPALT_DISABLE_ACK;\
type RDPCS_PHY_DP_MPLLB_V2I;\
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
+ type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
+ type RDPCS_PHY_RX_VREF_CTRL;\
type RDPCS_PHY_DP_MPLLB_CP_INT;\
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
type RDPCS_PHY_RX_REF_LD_VAL;\
type RDPCS_PHY_RX_VCO_LD_VAL;\
type DPCSTX_DEBUG_CONFIG; \
- type RDPCSTX_DEBUG_CONFIG
+ type RDPCSTX_DEBUG_CONFIG; \
+ type RDPCS_PHY_DP_TX0_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX0_EQ_PRE;\
+ type RDPCS_PHY_DP_TX0_EQ_POST;\
+ type RDPCS_PHY_DP_TX1_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX1_EQ_PRE;\
+ type RDPCS_PHY_DP_TX1_EQ_POST;\
+ type RDPCS_PHY_DP_TX2_EQ_MAIN;\
+ type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
+ type RDPCS_PHY_DP_TX2_EQ_PRE;\
+ type RDPCS_PHY_DP_TX2_EQ_POST;\
+ type RDPCS_PHY_DP_TX3_EQ_MAIN;\
+ type RDPCS_PHY_DCO_RANGE;\
+ type RDPCS_PHY_DCO_FINETUNE;\
+ type RDPCS_PHY_DP_TX3_EQ_PRE;\
+ type RDPCS_PHY_DP_TX3_EQ_POST;\
+ type RDPCS_PHY_SUP_PRE_HP;\
+ type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
+ type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
+ type UNIPHYA_SOFT_RESET;\
+ type UNIPHYB_SOFT_RESET;\
+ type UNIPHYC_SOFT_RESET;\
+ type UNIPHYD_SOFT_RESET;\
+ type UNIPHYE_SOFT_RESET;\
+ type UNIPHYF_SOFT_RESET
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_LANE0EN;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index e9ebbbe256b4..0a9ad692f541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding(
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
@@ -237,6 +240,9 @@ void opp1_set_dyn_expansion(
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
+ if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
+ return;
+
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 0f10adea000c..2c0ecfa5a643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -116,6 +116,8 @@
type FMT_RAND_G_SEED; \
type FMT_RAND_B_SEED; \
type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS; \
type FMT_CLAMP_DATA_EN; \
type FMT_CLAMP_COLOR_FORMAT; \
type FMT_DYNAMIC_EXP_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e74a07d03fde..dabccbd49ad4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
return ret;
}
-bool optc1_is_matching_timing(struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing)
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing)
{
- struct dc_crtc_timing hw_crtc_timing = {0};
struct dcn_otg_state s = {0};
- if (tg == NULL || otg_timing == NULL)
+ if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
- hw_crtc_timing.h_total = s.h_total + 1;
- hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
- hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
- hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
+ hw_crtc_timing->h_total = s.h_total + 1;
+ hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
+ hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
+ hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
- hw_crtc_timing.v_total = s.v_total + 1;
- hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
- hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
- hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
-
- if (otg_timing->h_total != hw_crtc_timing.h_total)
- return false;
-
- if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
- return false;
-
- if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
- return false;
-
- if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
- return false;
-
- if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
- return false;
-
- if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
- return false;
-
- if (otg_timing->v_total != hw_crtc_timing.v_total)
- return false;
-
- if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
- return false;
-
- if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
- return false;
-
- if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
- return false;
-
- if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
- return false;
+ hw_crtc_timing->v_total = s.v_total + 1;
+ hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
+ hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
+ hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
@@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
- .is_matching_timing = optc1_is_matching_timing,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
- .setup_manual_trigger = optc1_setup_manual_trigger
+ .setup_manual_trigger = optc1_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 83575599672e..c8d795b335ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -547,9 +547,8 @@ struct dcn_otg_state {
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
-bool optc1_is_matching_timing(
- struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing);
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing);
bool optc1_validate_timing(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1599bb971111..e3b279ff0d21 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
@@ -642,7 +650,10 @@ struct dce_aux *dcn10_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1308,6 +1319,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ dc->caps.extended_aux_timeout_support = false;
+
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 9aa258f3550b..06e5bbb4545c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg(
return tg_inst;
}
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth)
+{
+ uint32_t hw_encoding = 0;
+ uint32_t hw_depth = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enc == NULL ||
+ encoding == NULL ||
+ depth == NULL)
+ return false;
+
+ REG_GET_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, &hw_encoding,
+ DP_COMPONENT_DEPTH, &hw_depth);
+
+ switch (hw_depth) {
+ case DP_COMPONENT_PIXEL_DEPTH_6BPC:
+ *depth = COLOR_DEPTH_666;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_8BPC:
+ *depth = COLOR_DEPTH_888;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_10BPC:
+ *depth = COLOR_DEPTH_101010;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_12BPC:
+ *depth = COLOR_DEPTH_121212;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_16BPC:
+ *depth = COLOR_DEPTH_161616;
+ break;
+ default:
+ *depth = COLOR_DEPTH_UNDEFINED;
+ break;
+ }
+
+ switch (hw_encoding) {
+ case DP_PIXEL_ENCODING_TYPE_RGB444:
+ *encoding = PIXEL_ENCODING_RGB;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR422:
+ *encoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR444:
+ case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
+ *encoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR420:
+ *encoding = PIXEL_ENCODING_YCBCR420;
+ break;
+ default:
+ *encoding = PIXEL_ENCODING_UNDEFINED;
+ break;
+ }
+ return true;
+}
+
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
@@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a512cbea00d1..c9cbc21d121e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -621,4 +621,9 @@ void get_audio_clock_info(
void enc1_reset_hdmi_stream_attribute(
struct stream_encoder *enc);
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 16476ed25536..1e1151356e60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -44,16 +44,12 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg2_update_dpp_dto(struct dccg *dccg,
- int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only)
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
- int current_phase, current_modulo;
ASSERT(req_dppclk <= ref_dppclk);
/* need to clamp to 8 bits */
@@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg,
if (req_dppclk > ref_dppclk)
req_dppclk = ref_dppclk;
}
-
- REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst],
- DPPCLK0_DTO_PHASE, &current_phase,
- DPPCLK0_DTO_MODULO, &current_modulo);
-
- if (reduce_divider_only) {
- // requested phase/modulo greater than current
- if (req_dppclk * current_modulo >= current_phase * ref_dppclk) {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, current_phase,
- DPPCLK0_DTO_MODULO, current_modulo);
- }
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- }
-
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, req_dppclk,
+ DPPCLK0_DTO_MODULO, ref_dppclk);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
@@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg)
{
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- /* Fall through */
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- /* Fall through */
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- /* Fall through */
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- /* Fall through */
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- /* Fall through */
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
}
static const struct dccg_funcs dccg2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 74a074a873cd..2205cb0204e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -97,7 +97,7 @@ struct dcn_dccg {
const struct dccg_mask *dccg_mask;
};
-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only);
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 2f5aade1e882..4d7e45892f08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
@@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 290b2854bd2c..5b03b737b1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,12 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -166,27 +172,41 @@
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -261,18 +281,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -341,9 +352,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -356,7 +364,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -521,9 +528,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -560,6 +572,7 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
@@ -593,6 +606,7 @@
type OBUF_MEM_PWR_FORCE;\
type LUT_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
};
@@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b419407af94..63eb377ed9c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..880954ac0b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index b83c022e2c6f..8b8438566101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -588,7 +588,7 @@ static void hubbub2_program_watermarks(
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
- .program_watermarks = hubbub2_program_watermarks
+ .program_watermarks = hubbub2_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 626117d3b4e9..501532dd523a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -81,6 +81,7 @@ struct dcn20_hubbub {
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 1212da12c414..6229a8ca0013 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -999,72 +999,6 @@ void dcn20_enable_plane(
}
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
-}
-
-static void dcn20_program_all_pipe_in_tree(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
-
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
-
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
- }
-
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
-
- if (pipe_ctx->bottom_pipe != NULL) {
- ASSERT(pipe_ctx->bottom_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
- } else if (pipe_ctx->next_odm_pipe != NULL) {
- ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context);
- }
-}
-
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1124,114 +1058,462 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_apply_ctx_for_surface(
- struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
- struct dc_state *context)
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
- int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
- struct pipe_ctx *prev_top_pipe_to_program =
- find_top_pipe_for_stream(dc, dc->current_state, stream);
- DC_LOGGER_INIT(dc->ctx->logger);
+ new_pipe->update_flags.raw = 0;
- if (!top_pipe_to_program)
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
- if (pipe_ctx->stream == stream &&
- pipe_ctx->stream == old_pipe_ctx->stream)
- pipe_ctx->stream_res.gsl_group =
- old_pipe_ctx->stream_res.gsl_group;
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp
+ || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
+ && old_pipe->bottom_pipe->plane_res.mpcc_inst
+ != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
}
+}
- tg = top_pipe_to_program->stream_res.tg;
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+ if (pipe_ctx->update_flags.bits.dppclk) {
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+ dc->res_pool->dccg->funcs->update_dpp_dto(
+ dc->res_pool->dccg,
+ dpp->inst,
+ pipe_ctx->plane_res.bw.dppclk_khz);
+ }
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ dcn10_build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
+ if (pipe_ctx->update_flags.bits.mpcc
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ /* Need mpcc to be idle if changing opp */
+ if (pipe_ctx->update_flags.bits.opp_changed) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ int mpcc_inst;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
+ continue;
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ }
}
+ dc->hwss.update_mpcc(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
+ }
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
- }
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
}
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ if (pipe_ctx->update_flags.bits.global_sync) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ dc->hwss.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dcn20_enable_plane(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+}
+
+static bool does_pipe_need_lock(struct pipe_ctx *pipe)
+{
+ if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->update_flags.raw)
+ return true;
+ if (pipe->bottom_pipe)
+ return does_pipe_need_lock(pipe->bottom_pipe);
+
+ return false;
+}
+
+static void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ int i;
+ bool pipe_locked[MAX_PIPES] = {false};
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
+ context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (!context->res_ctx.pipe_ctx[i].top_pipe &&
+ does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
+ pipe_locked[i] = true;
+ }
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ dcn20_program_pipe(dc, pipe, context);
+ pipe = pipe->bottom_pipe;
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
+ && dc->hwss.program_all_writeback_pipes_in_tree)
+ dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ }
+ }
+
+ /* Unlock all locked pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ if (pipe_locked[i]) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -1239,15 +1521,22 @@ static void dcn20_apply_ctx_for_surface(
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
- i = 0;
- if (num_planes > 0 && top_pipe_to_program &&
- (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) {
- while (i < TIMEOUT_FOR_PIPE_ENABLE_MS &&
- top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) {
- i += 1;
- msleep(1);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ msleep(1);
}
}
+
+ /* WA to apply WM setting*/
+ if (dc->hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
}
@@ -1319,8 +1608,12 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
if (pipe_ctx->prev_odm_pipe == NULL)
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1337,7 +1630,8 @@ bool dcn20_update_bandwidth(
static void dcn20_enable_writeback(
struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1354,7 +1648,7 @@ static void dcn20_enable_writeback(
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1919,8 +2213,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- if (link->dc->hwss.program_dmdata_engine)
- link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (link->dc->hwss.program_dmdata_engine)
+ link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
link->dc->hwss.update_info_frame(pipe_ctx);
@@ -2095,7 +2391,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
+ dc->hwss.apply_ctx_for_surface = NULL;
+ dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 92ab3dd91814..9dbc2effa4ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -96,4 +96,18 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
void dcn20_display_init(struct dc *dc);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..0c98a0bbbd14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -91,6 +91,13 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+
+
};
struct dpcssys_phy_seq_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 2137e2be2140..3b613fb93ef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
void optc2_set_dwb_source(struct timing_generator *optc,
@@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
- .is_matching_timing = optc1_is_matching_timing
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 5a2763daff4d..ee9157b673ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN10
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN10
};
#define dwbc_regs_dcn2(id)\
@@ -732,6 +734,15 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
@@ -922,7 +933,10 @@ struct dce_aux *dcn20_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1154,6 +1168,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -1760,7 +1776,7 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
- if (!resource_are_streams_timing_synchronizable(
+ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
synchronized_vblank = false;
@@ -1892,7 +1908,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
+ pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (true) /* todo */
@@ -1906,6 +1922,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+#endif
+
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
@@ -2202,7 +2223,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
- if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
@@ -2249,11 +2271,7 @@ bool dcn20_fast_validate_bw(
bool out = false;
int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
int split_threshold = dc->res_pool->pipe_count / 2;
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
@@ -2330,24 +2348,8 @@ bool dcn20_fast_validate_bw(
goto validate_out;
}
- context->bw_ctx.dml.ip.odm_capable = 0;
-
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
- }
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
@@ -2469,6 +2471,7 @@ bool dcn20_fast_validate_bw(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe))
goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
} else
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
@@ -2482,7 +2485,7 @@ bool dcn20_fast_validate_bw(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
@@ -2574,6 +2577,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2585,6 +2592,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2596,6 +2607,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2605,6 +2620,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
}
void dcn20_calculate_dlg_params(
@@ -2624,7 +2643,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
@@ -2640,8 +2659,8 @@ void dcn20_calculate_dlg_params(
continue;
if (!visited[pipe_idx]) {
- display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest;
+ display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
@@ -2801,7 +2820,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(false);
restore_dml_state:
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
@@ -2942,7 +2960,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@@ -2957,7 +2975,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -3035,7 +3053,7 @@ static void cap_soc_clocks(
static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
+ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -3043,6 +3061,8 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (num_states == 0)
return;
+ memset(calculated_states, 0, sizeof(calculated_states));
+
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
else
@@ -3340,6 +3360,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 44f95aa0d61e..55006462f481 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -95,9 +95,6 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 5ab9d6240498..4b3401616434 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ef673bffc241..19fabac13c65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
+DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
cc_stack_align := -mpreferred-stack-boundary=4
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index d1266741763b..f546260c15b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include <linux/delay.h>
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn21_hubbub.h"
@@ -51,7 +52,7 @@
#ifdef NUM_VMID
#undef NUM_VMID
#endif
-#define NUM_VMID 1
+#define NUM_VMID 16
static uint32_t convert_and_clamp(
uint32_t wm_ns,
@@ -71,56 +72,76 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
//Poll until RIOMMU_ACTIVE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
- //Reflect the power status of DCHUBBUB
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
- //Start rIOMMU prefetching
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
- // Enable dynamic clock gating
- REG_UPDATE_4(DCHVM_CLK_CTRL,
- HVM_DISPCLK_R_GATE_DIS, 0,
- HVM_DISPCLK_G_GATE_DIS, 0,
- HVM_DCFCLK_R_GATE_DIS, 0,
- HVM_DCFCLK_G_GATE_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
- //Poll until HOSTVM_PREFETCH_DONE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ }
}
-static int hubbub21_init_dchub(struct hubbub *hubbub,
+int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
dcn21_dchvm_init(hubbub);
return NUM_VMID;
}
-static void hubbub21_program_urgent_watermarks(
+void hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
+ hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ }
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
+ hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ }
+
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
@@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
+ hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ }
+
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
@@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
}
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
+ hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ }
}
-static void hubbub21_program_stutter_watermarks(
+void hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks(
}
}
-static void hubbub21_program_pstate_watermarks(
+void hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
+void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+}
static const struct hubbub_funcs hubbub21_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub21_init_dchub,
- .init_vm_ctx = NULL,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
void hubbub21_construct(struct dcn20_hubbub *hubbub,
@@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index 6ff3cdb89178..c4840dfb1fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -36,6 +36,10 @@
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DCHVM_CTRL0), \
SR(DCHVM_MEM_CTRL), \
@@ -44,16 +48,9 @@
SR(DCHVM_RIOMMU_STAT0)
#define HUBBUB_REG_LIST_DCN21()\
- HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
- HUBBUB_HVM_REG_LIST(), \
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE)
+ HUBBUB_HVM_REG_LIST()
#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
@@ -102,7 +99,7 @@
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
- HUBBUB_MASK_SH_LIST_HVM(mask_sh),\
+ HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -114,11 +111,28 @@
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
void dcn21_dchvm_init(struct hubbub *hubbub);
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub21_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index a00af513aa2b..2f5a5867e674 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -22,6 +22,8 @@
* Authors: AMD
*
*/
+
+#include "dcn10/dcn10_hubp.h"
#include "dcn21_hubp.h"
#include "dm_services.h"
@@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
- .hubp_program_surface_config = hubp2_program_surface_config,
+ .hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
new file mode 100644
index 000000000000..b25215cadf85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dce/dce_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "vmid.h"
+#include "reg_helper.h"
+#include "hw/clk_mgr.h"
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
+ struct dce_hwseq *hws)
+{
+ uint32_t page_table_base_hi;
+ uint32_t page_table_base_lo;
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
+
+ config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
+
+}
+
+static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ mmhub_update_page_table_config(&config, hws);
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+// work around for Renoir s0i3, if register is programmed, bypass golden init.
+
+static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t value = 0;
+
+ value = REG_READ(MICROSECOND_TIME_BASE_DIV);
+
+ return value != 0x00120464;
+}
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+}
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+}
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dcn20_hw_sequencer_construct(dc);
+ dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
+ dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
+ dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
+ dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
new file mode 100644
index 000000000000..be67b62e6fb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -0,0 +1,33 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN21_H__
+#define __DC_HWSS_DCN21_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
new file mode 100644
index 000000000000..e8a504ca5890
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include <linux/delay.h>
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn21_link_encoder.h"
+#include "stream_encoder.h"
+
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
+ // RBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 238,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 44237,
+ .mpllb_ssc_stepsize = 59454,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 304,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 49152,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 55296,
+ .mpllb_ssc_stepsize = 74318,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 1,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 16,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+ cfg->use_calibration_setting = true;
+
+ //TODO: need to implement a proper lane mapping for Renoir.
+ for (i = 0; i < 4; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
+
+ if (!value && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+}
+
+bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int value;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+
+ if (value == 1) {
+ ASSERT(0);
+ return false;
+ }
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 0);
+
+ udelay(40);
+
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+ if (value == 1) {
+ ASSERT(0);
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ return false;
+ }
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
+
+ return true;
+}
+
+
+
+static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
+
+}
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
+
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn21_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+}
+
+void dcn21_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ if (dc_is_dp_signal(signal))
+ dcn21_link_encoder_release_phy(enc);
+}
+
+
+static const struct link_encoder_funcs dcn21_link_enc_funcs = {
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .read_state = link_enc2_read_state,
+#endif
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn21_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn21_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+};
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc21->enc10;
+
+ enc10->base.funcs = &dcn21_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
new file mode 100644
index 000000000000..1d7a1a51f13d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN21_H__
+#define __DC_LINK_ENCODER__DCN21_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+struct dcn21_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index de182185fe1f..bd16a8bfc951 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
@@ -42,11 +40,11 @@
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn20/dcn20_opp.h"
#include "dcn20/dcn20_dsc.h"
-#include "dcn20/dcn20_link_encoder.h"
+#include "dcn21/dcn21_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
@@ -84,6 +82,7 @@
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
+ .odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 1,
@@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 4,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
@@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 5,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_6)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
static const struct dcn21_dmcub_registers dmcub_regs = {
DMCUB_REG_LIST_DCN()
@@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(4),
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
@@ -636,6 +667,8 @@ static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -683,7 +716,10 @@ static struct dce_aux *dcn21_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -726,11 +762,12 @@ static const struct resource_caps res_cap_rn = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
- .num_audio = 6, // 6 audio endpoints. 4 audio streams
+ .num_audio = 4, // 4 audio endpoints. 4 audio streams
.num_stream_encoder = 5,
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
+ .num_vmid = 1,
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
#endif
@@ -800,11 +837,11 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 5120,/*upto 5K*/
+ .max_downscale_src_width = 3840,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
- .disable_48mhz_pwrdwn = true,
+ .disable_48mhz_pwrdwn = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -939,7 +976,7 @@ static void destruct(struct dcn21_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
- dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
@@ -974,6 +1011,29 @@ static void calculate_wm_set_for_vlevel(
}
+static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+{
+ kernel_fpu_begin();
+ if (dc->bb_overrides.sr_exit_time_ns) {
+ bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ bb->sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.urgent_latency_ns) {
+ bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns) {
+ bb->dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+}
+
void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -988,6 +1048,8 @@ void dcn21_calculate_wm(
ASSERT(bw_params);
+ patch_bounding_box(dc, &context->bw_ctx.dml.soc);
+
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1278,7 +1340,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
- dcn2_1_soc.num_states = 0;
for (i = 0; i < clk_table->num_entries; i++) {
@@ -1288,8 +1349,9 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
/* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
- dcn2_1_soc.num_states++;
}
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.num_states = i;
}
/* Temporary Place holder until we can get them from fuse */
@@ -1317,32 +1379,42 @@ static struct dpm_clocks dummy_clocks = {
};
-enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
+static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
struct dpm_clocks *clock_table)
{
*clock_table = dummy_clocks;
return PP_SMU_RESULT_OK;
}
-struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
- pp_smu->ctx.ver = PP_SMU_VER_RN;
+ if (!pp_smu)
+ return pp_smu;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
+ pp_smu->ctx.ver = PP_SMU_VER_RN;
+ pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
+ pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ } else {
- pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
- pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_RN)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+ }
return pp_smu;
}
-void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -1400,6 +1472,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN21 = true;
}
return hws;
}
@@ -1418,9 +1491,103 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn21_hwseq_create,
};
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static struct link_encoder *dcn21_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn21_link_encoder *enc21 =
+ kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+
+ if (!enc21)
+ return NULL;
+
+ dcn21_link_encoder_construct(enc21,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc21->enc10.base;
+}
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
- .link_enc_create = dcn20_link_encoder_create,
+ .link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1437,9 +1604,10 @@ static bool construct(
struct dc *dc,
struct dcn21_resource_pool *pool)
{
- int i;
+ int i, j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
ctx->dc_bios->regs = &bios_regs;
@@ -1457,7 +1625,9 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 4;
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
@@ -1467,6 +1637,7 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1516,6 +1687,26 @@ static bool construct(
goto create_fail;
}
+ pool->base.dmcu = dcn20_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
pool->base.dmcub = dcn21_dmcub_create(ctx,
&dmcub_regs,
@@ -1537,8 +1728,15 @@ static bool construct(
if (!pool->base.irqs)
goto create_fail;
+ j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
@@ -1562,6 +1760,23 @@ static bool construct(
"DC: failed to create dpps!\n");
goto create_fail;
}
+
+ pool->base.opps[i] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
@@ -1582,27 +1797,9 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- for (i = 0; i < pool->base.res_cap->num_opp; i++) {
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error(
- "DC: failed to create output pixel processor!\n");
- goto create_fail;
- }
- }
-
- for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
- ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error("DC: failed to create tg!\n");
- goto create_fail;
- }
- }
-
- pool->base.timing_generator_count = i;
+ pool->base.timing_generator_count = j;
+ pool->base.pipe_count = j;
+ pool->base.mpcc_count = j;
pool->base.mpc = dcn21_mpc_create(ctx);
if (pool->base.mpc == NULL) {
@@ -1645,7 +1842,7 @@ static bool construct(
&res_create_funcs : &res_create_maximus_funcs)))
goto create_fail;
- dcn20_hw_sequencer_construct(dc);
+ dcn21_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
new file mode 100644
index 000000000000..626d22d437f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_CP_PSP_IF__H
+#define DM_CP_PSP_IF__H
+
+struct dc_link;
+
+struct cp_psp_stream_config {
+ uint8_t otg_inst;
+ uint8_t link_enc_inst;
+ uint8_t stream_enc_inst;
+ void *dm_stream_ctx;
+ bool dpms_off;
+};
+
+struct cp_psp_funcs {
+ void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
+};
+
+struct cp_psp {
+ void *handle;
+ struct cp_psp_funcs funcs;
+};
+
+
+#endif /* DM_CP_PSP_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index b6b4333737f2..94b75e942607 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
/*
* Polls for ACT (allocation change trigger) handled and
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index c03a441ee638..b01db61b6181 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -249,12 +249,10 @@ struct pp_smu_funcs_nv {
};
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
-#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4
-#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
-#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_FCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 8
struct dpm_clock {
uint32_t Freq; // In MHz
@@ -288,7 +286,6 @@ struct pp_smu_funcs_rn {
enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
struct dpm_clocks *clock_table);
};
-#endif
struct pp_smu_funcs {
struct pp_smu ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 0fafd693ffb4..841ed6c23f93 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -3901,6 +3902,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3925,7 +3930,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 878bf4782ce6..2c7455e22a65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index ed8bf5f723c9..1e6aeb1bd2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 3b6ed60dcd35..ba77957aefe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP(
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
else
return BPP_INVALID;
}
@@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index f4c1ef9046bf..83f84cdd4055 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -269,7 +269,7 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
- int output_bpp;
+ double output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 65cf4edddaff..362dc6ea98ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -434,6 +434,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index ad8571f5a142..4c3e9cc30167 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -243,7 +243,7 @@ void dml1_extract_rq_regs(
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- /* FIXME: take the max between luma, chroma chunk size?
+ /* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
@@ -602,7 +602,7 @@ static void get_surf_rq_param(
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
- /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ /* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params(
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
- prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
@@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params(
dcc_en = e2e_pipe_param.pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format);
- mode_422 = 0; /* FIXME */
+ mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format,
@@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params(
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
- hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 5995bcdfed54..e60f760585e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -23,8 +23,7 @@
*/
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dc.h"
-#include "core_types.h"
+#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
@@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = {
/* This module's internal functions */
+static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
+
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
}
static void get_dsc_enc_caps(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
- struct display_stream_compressor *dsc = dc->res_pool->dscs[0];
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc)
@@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range(
struct dc_dsc_bw_range *range)
{
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing);
/* max dsc target bpp */
range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz);
@@ -512,6 +563,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ int min_slice_height_override,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -680,7 +732,10 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- slice_height = min(dsc_policy.min_sice_height, pic_height);
+ if (min_slice_height_override == 0)
+ slice_height = min(dsc_policy.min_sice_height, pic_height);
+ else
+ slice_height = min(min_slice_height_override, pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
@@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_bpp,
const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
- is_dsc_possible = setup_dsc_config(dsc_sink_caps,
- &dsc_enc_caps,
- 0,
- timing, &config);
+ is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
+ dsc_min_slice_height_override, &config);
if (is_dsc_possible)
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
@@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range(
}
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -841,11 +896,11 @@ bool dc_dsc_compute_config(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_cfg);
+ timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index ca51e83f8764..76c4b12d6824 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
{
float bpp_group;
float initial_xmit_delay_factor;
- int source_bpp;
int padding_pixels;
int i;
@@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->initial_xmit_delay++;
}
- source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5);
-
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f8f85490e77e..f67c18375bfd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -321,8 +321,6 @@ void dal_gpio_destroy(
return;
}
- dal_gpio_close(*gpio);
-
switch ((*gpio)->id) {
case GPIO_ID_DDC_DATA:
kfree((*gpio)->hw_container.ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index d03165e71dc6..92280cc05e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux(
return;
}
- dal_gpio_close(*mux);
dal_gpio_destroy(mux);
kfree(*mux);
@@ -460,7 +459,6 @@ void dal_gpio_destroy_irq(
return;
}
- dal_gpio_close(*irq);
dal_gpio_destroy(irq);
kfree(*irq);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
new file mode 100644
index 000000000000..4170b6eb9ec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
@@ -0,0 +1,28 @@
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'hdcp' sub-component of DAL.
+#
+
+HDCP_MSG = hdcp_msg.o
+
+AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG)
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
new file mode 100644
index 000000000000..cf6ef387e5d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "include/hdcp_types.h"
+#include "include/i2caux_interface.h"
+#include "include/signal_types.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define HDCP14_KSV_SIZE 5
+#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+
+static const bool hdcp_cmd_is_read[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = true,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ [HDCP_MESSAGE_ID_READ_PJ] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = false,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = false,
+ [HDCP_MESSAGE_ID_WRITE_AN] = false,
+ [HDCP_MESSAGE_ID_READ_VH_X] = true,
+ [HDCP_MESSAGE_ID_READ_VH_0] = true,
+ [HDCP_MESSAGE_ID_READ_VH_1] = true,
+ [HDCP_MESSAGE_ID_READ_VH_2] = true,
+ [HDCP_MESSAGE_ID_READ_VH_3] = true,
+ [HDCP_MESSAGE_ID_READ_VH_4] = true,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = true,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = true,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true,
+ [HDCP_MESSAGE_ID_READ_BINFO] = true,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = true,
+ [HDCP_MESSAGE_ID_RX_CAPS] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = true,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
+};
+
+struct protection_properties {
+ bool supported;
+ bool (*process_transaction)(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+};
+
+static const struct protection_properties non_supported_protection = {
+ .supported = false
+};
+
+static bool hdmi_14_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ uint8_t *buff = NULL;
+ bool result;
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+ uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
+ struct i2c_payload i2c_payloads[] = {
+ { true, 0, 1, &offset },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_secondary;
+ break;
+ case HDCP_LINK_PRIMARY:
+ default:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_primary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_primary;
+ break;
+ }
+
+ if (hdcp_cmd_is_read[message_info->msg_id]) {
+ i2c_payloads[1].write = false;
+ i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads);
+ i2c_payloads[1].length = message_info->length;
+ i2c_payloads[1].data = message_info->data;
+ } else {
+ i2c_command.number_of_payloads = 1;
+ buff = kzalloc(message_info->length + 1, GFP_KERNEL);
+
+ if (!buff)
+ return false;
+
+ buff[0] = offset;
+ memmove(&buff[1], message_info->data, message_info->length);
+ i2c_payloads[0].length = message_info->length + 1;
+ i2c_payloads[0].data = buff;
+ }
+
+ i2c_command.payloads = i2c_payloads;
+ i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW
+ i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ result = dm_helpers_submit_i2c(
+ link->ctx,
+ link,
+ &i2c_command);
+
+ if (buff)
+ kfree(buff);
+
+ return result;
+}
+
+static const struct protection_properties hdmi_14_protection = {
+ .supported = true,
+ .process_transaction = hdmi_14_process_transaction
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static bool dpcd_access_helper(
+ struct dc_link *link,
+ uint32_t length,
+ uint8_t *data,
+ uint32_t dpcd_addr,
+ bool is_read)
+{
+ enum dc_status status;
+ uint32_t cur_length = 0;
+ uint32_t offset = 0;
+ uint32_t ksv_read_size = 0x6803b - 0x6802c;
+
+ /* Read KSV, need repeatedly handle */
+ if (dpcd_addr == 0x6802c) {
+ if (length % HDCP14_KSV_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_KSV_SIZE);
+ }
+ if (length > HDCP14_MAX_KSV_FIFO_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_MAX_KSV_FIFO_SIZE);
+ }
+
+ DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n",
+ __func__,
+ length / HDCP14_KSV_SIZE);
+
+ while (length > 0) {
+ if (length > ksv_read_size) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ ksv_read_size);
+
+ data += ksv_read_size;
+ length -= ksv_read_size;
+ } else {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ length);
+
+ data += length;
+ length = 0;
+ }
+
+ if (status != DC_OK)
+ return false;
+ }
+ } else {
+ while (length > 0) {
+ if (length > DEFAULT_AUX_MAX_DATA_SIZE)
+ cur_length = DEFAULT_AUX_MAX_DATA_SIZE;
+ else
+ cur_length = length;
+
+ if (is_read) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ } else {
+ status = core_link_write_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ }
+
+ if (status != DC_OK)
+ return false;
+
+ length -= cur_length;
+ offset += cur_length;
+ }
+ }
+ return true;
+}
+
+static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+ message_info->data,
+ hdcp_dpcd_addrs[message_info->msg_id],
+ hdcp_cmd_is_read[message_info->msg_id]);
+}
+
+static const struct protection_properties dp_11_protection = {
+ .supported = true,
+ .process_transaction = dp_11_process_transaction
+};
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f189307750ab..eee78a73d88c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
/************ link *****************/
struct link_init_data {
@@ -231,6 +233,7 @@ struct resource_pool {
struct dcn_fe_bandwidth {
int dppclk_khz;
+
};
struct stream_resource {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index b1fab251c09b..14716ba35662 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_channel_operation_result *operation_result);
@@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 08a4df2c61a8..045138dbdccb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,6 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+bool dp_verify_mst_link_cap(
+ struct dc_link *link);
+
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index e79cd4e92919..e77b3a76766d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -140,6 +140,9 @@ struct write_command_context {
struct aux_engine_funcs {
+ bool (*configure_timeout)(
+ struct ddc_service *ddc,
+ uint32_t timeout);
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 76f9ad1b23df..f2e21cb9fbd5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -47,7 +47,7 @@
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */
-#define MAX_NUM_DPM_LVL 4
+#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
@@ -149,6 +149,7 @@ struct wm_table {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dispclk_vco_khz;
struct clk_limit_table clk_table;
struct wm_table wm_table;
};
@@ -180,12 +181,16 @@ struct clk_mgr_funcs {
struct dc_state *context,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+
+ bool (*are_clock_states_equal) (struct dc_clocks *a,
+ struct dc_clocks *b);
};
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
+ bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
struct clk_bw_params *bw_params;
@@ -199,4 +204,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7dd46eb96d67..2e8cd7956a17 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -184,6 +184,21 @@ struct clk_mgr_registers {
uint32_t MP1_SMN_C2PMSG_91;
};
+enum clock_type {
+ clock_type_dispclk = 1,
+ clock_type_dcfclk,
+ clock_type_socclk,
+ clock_type_pixelclk,
+ clock_type_phyclk,
+ clock_type_dppclk,
+ clock_type_fclk,
+ clock_type_dcfdsclk,
+ clock_type_dscclk,
+ clock_type_uclk,
+ clock_type_dramclk,
+};
+
+
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index d8e744f366e5..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -38,8 +38,7 @@ struct dccg {
struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only);
+ int req_dppclk);
void (*get_dccg_ref_freq)(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index a6297219d7fc..c81a17aeaa25 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -147,6 +147,7 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+ void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 1ddb1c6fa149..c6ff3d78b435 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -28,7 +28,11 @@
#include "dc_dsc.h"
#include "dc_hw_types.h"
-#include "dc_dp_types.h"
+#include "dc_types.h"
+/* do not include any other headers
+ * or else it might break Edid Utility functionality.
+ */
+
/* Input parameters for configuring DSC from the outside of DSC */
struct dsc_config {
@@ -81,12 +85,6 @@ struct dsc_enc_caps {
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
};
-struct display_stream_compressor {
- const struct dsc_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index abb4e4237fb6..b21909216fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -184,6 +184,10 @@ struct link_encoder_funcs {
bool (*fec_is_active)(struct link_encoder *enc);
#endif
bool (*is_in_alt_mode) (struct link_encoder *enc);
+
+ void (*get_max_link_cap)(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index e8668388581b..67b610d6d91f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -43,6 +43,7 @@ struct dcn_watermarks {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
+ int32_t urgent_latency_ns;
#endif
struct cstate_pstate_watermarks_st cstate_pstate;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 957e9047381a..18def2b6fafe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -208,6 +208,7 @@ struct output_pixel_processor {
struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
+ uint32_t dyn_expansion;
};
enum fmt_stereo_action {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe9b7a10a1c3..6305e388612a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -214,6 +214,11 @@ struct stream_encoder_funcs {
unsigned int (*dig_source_otg)(
struct stream_encoder *enc);
+ bool (*dp_get_pixel_format)(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 6196cc32356e..27c73caf74ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -261,6 +261,8 @@ struct timing_generator_funcs {
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
+ bool (*get_hw_timing)(struct timing_generator *optc,
+ struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 3a938cd414ea..d39c1e11def5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -114,6 +114,9 @@ struct hw_sequencer_funcs {
int opp_id);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ void (*program_front_end_for_ctx)(
+ struct dc *dc,
+ struct dc_state *context);
void (*program_triplebuffer)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -229,6 +232,13 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct dc_state *context);
+ void (*exit_optimized_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+ void (*optimize_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*update_bandwidth)(
struct dc *dc,
@@ -321,10 +331,12 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*update_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
#endif
@@ -337,6 +349,9 @@ struct hw_sequencer_funcs {
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 18961707db23..9ad49da50a17 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -31,6 +31,8 @@
#define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
#define DP_BRANCH_DEVICE_ID_00001A 0x00001A
#define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
new file mode 100644
index 000000000000..f31e6befc8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -0,0 +1,96 @@
+/*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: AMD
+*
+*/
+
+#ifndef __DC_HDCP_TYPES_H__
+#define __DC_HDCP_TYPES_H__
+
+enum hdcp_message_id {
+ HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ HDCP_MESSAGE_ID_READ_BKSV = 0,
+ /* HDMI is called Ri', DP is called R0' */
+ HDCP_MESSAGE_ID_READ_RI_R0,
+ HDCP_MESSAGE_ID_READ_PJ,
+ HDCP_MESSAGE_ID_WRITE_AKSV,
+ HDCP_MESSAGE_ID_WRITE_AINFO,
+ HDCP_MESSAGE_ID_WRITE_AN,
+ HDCP_MESSAGE_ID_READ_VH_X,
+ HDCP_MESSAGE_ID_READ_VH_0,
+ HDCP_MESSAGE_ID_READ_VH_1,
+ HDCP_MESSAGE_ID_READ_VH_2,
+ HDCP_MESSAGE_ID_READ_VH_3,
+ HDCP_MESSAGE_ID_READ_VH_4,
+ HDCP_MESSAGE_ID_READ_BCAPS,
+ HDCP_MESSAGE_ID_READ_BSTATUS,
+ HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ HDCP_MESSAGE_ID_HDCP2VERSION,
+ HDCP_MESSAGE_ID_RX_CAPS,
+ HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ HDCP_MESSAGE_ID_READ_RXSTATUS,
+ HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ HDCP_MESSAGE_ID_MAX
+};
+
+enum hdcp_version {
+ HDCP_Unknown = 0,
+ HDCP_VERSION_14,
+ HDCP_VERSION_22,
+};
+
+enum hdcp_link {
+ HDCP_LINK_PRIMARY,
+ HDCP_LINK_SECONDARY
+};
+
+struct hdcp_protection_message {
+ enum hdcp_version version;
+ /* relevant only for DVI */
+ enum hdcp_link link;
+ enum hdcp_message_id msg_id;
+ uint32_t length;
+ uint8_t max_retries;
+ uint8_t *data;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ec70c9b12e1a..16e69bbc69aa 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total)
+ v_total = stream->timing.v_total;
+
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
@@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
+ /* Rounded to the nearest Hz */
+ nominal_field_rate_in_uhz = 1000000ULL *
+ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
@@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
- struct core_freesync *core_freesync = NULL;
-
if (mod_freesync == NULL)
return;
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
@@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int total = stream->timing.h_total * stream->timing.v_total;
- /* Calculate nominal field rate for stream */
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.h_total);
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.v_total);
+
+ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
new file mode 100644
index 000000000000..1c3c6d47973a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'hdcp' sub-module of DAL.
+#
+
+HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
+ hdcp1_execution.o hdcp1_transition.o
+
+AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
+#$(info ************ DAL-HDCP_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
new file mode 100644
index 000000000000..d7ac445dec6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static void push_error_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_status status)
+{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
+ if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
+ trace->errors[trace->error_count].status = status;
+ trace->errors[trace->error_count].state_id = hdcp->state.id;
+ trace->error_count++;
+ HDCP_ERROR_TRACE(hdcp, status);
+ }
+
+ hdcp->connection.hdcp1_retry_count++;
+}
+
+static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
+{
+ int i, display_enabled = 0;
+
+ /* if all displays on the link are disabled, hdcp is not desired */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ display_enabled = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* initialize transition input */
+ memset(input, 0, sizeof(union mod_hdcp_transition_input));
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* update topology event if hdcp is not desired */
+ status = mod_hdcp_add_display_topology(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_execution(hdcp,
+ event_ctx, &input->hdcp1);
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->unexpected_event)
+ goto out;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (is_dp_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ increment_stay_counter(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_hdcp1(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+ mod_hdcp_hdcp1_destroy_session(hdcp);
+
+ if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ }
+
+out:
+ /* stop callback and watchdog requests from previous authentication*/
+ output->watchdog_timer_stop = 1;
+ output->callback_stop = 1;
+ return status;
+}
+
+static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (current_state(hdcp) != HDCP_UNINITIALIZED) {
+ HDCP_TOP_RESET_CONN_TRACE(hdcp);
+ set_state_id(hdcp, output, HDCP_UNINITIALIZED);
+ }
+ memset(&hdcp->connection, 0, sizeof(hdcp->connection));
+out:
+ return status;
+}
+
+/*
+ * Implementation of functions in mod_hdcp.h
+ */
+size_t mod_hdcp_get_memory_size(void)
+{
+ return sizeof(struct mod_hdcp);
+}
+
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config)
+{
+ struct mod_hdcp_output output;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ memset(&output, 0, sizeof(output));
+ hdcp->config = *config;
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, &output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_output output;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ memset(&output, 0, sizeof(output));
+ status = reset_connection(hdcp, &output);
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ else
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display_container = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* skip inactive display */
+ if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* check existing display container */
+ if (get_active_display_at_index(hdcp, display->index)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* find an empty display container */
+ display_container = get_empty_display_container(hdcp);
+ if (!display_container) {
+ status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
+ goto out;
+ }
+
+ /* reset existing authentication status */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+
+ /* reset retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication */
+ if (current_state(hdcp) != HDCP_INITIALIZED)
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* stop current authentication */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* remove display */
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* clear retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication for remaining displays*/
+ if (get_active_display_count(hdcp) > 0)
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
+ output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ goto out;
+ }
+
+ /* populate query */
+ query->link = &hdcp->connection.link;
+ query->display = display;
+ query->trace = &hdcp->connection.trace;
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status exec_status, trans_status, reset_status, status;
+ struct mod_hdcp_event_context event_ctx;
+
+ HDCP_EVENT_TRACE(hdcp, event);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+ memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
+ event_ctx.event = event;
+
+ /* execute and transition */
+ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
+ trans_status = transition(
+ hdcp, &event_ctx, &hdcp->auth.trans_input, output);
+ if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
+ push_error_status(hdcp, status);
+ } else {
+ status = exec_status;
+ push_error_status(hdcp, status);
+ }
+
+ /* reset authentication if needed */
+ if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
+ HDCP_FULL_DDC_TRACE(hdcp);
+ reset_status = reset_authentication(hdcp, output);
+ if (reset_status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, reset_status);
+ }
+ return status;
+}
+
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal)
+{
+ enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ mode = MOD_HDCP_MODE_DEFAULT;
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ mode = MOD_HDCP_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ mode = MOD_HDCP_MODE_DP_MST;
+ break;
+ default:
+ break;
+ };
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
new file mode 100644
index 000000000000..5664bc0b5bd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef HDCP_H_
+#define HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp_log.h"
+
+#define BCAPS_READY_MASK 0x20
+#define BCAPS_REPEATER_MASK 0x40
+#define BSTATUS_DEVICE_COUNT_MASK 0X007F
+#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080
+#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800
+#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01
+#define BCAPS_REPEATER_MASK_DP 0x02
+#define BSTATUS_READY_MASK_DP 0x01
+#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02
+#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04
+#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08
+#define BINFO_DEVICE_COUNT_MASK_DP 0X007F
+#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080
+#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800
+
+#define RXSTATUS_MSG_SIZE_MASK 0x03FF
+#define RXSTATUS_READY_MASK 0x0400
+#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800
+#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0
+#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01
+#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02
+#define RXSTATUS_READY_MASK_DP 0x0001
+#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002
+#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004
+#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008
+#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010
+
+enum mod_hdcp_trans_input_result {
+ UNKNOWN = 0,
+ PASS,
+ FAIL
+};
+
+struct mod_hdcp_transition_input_hdcp1 {
+ uint8_t bksv_read;
+ uint8_t bksv_validation;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t an_write;
+ uint8_t aksv_write;
+ uint8_t ainfo_write;
+ uint8_t bcaps_read;
+ uint8_t r0p_read;
+ uint8_t rx_validation;
+ uint8_t encryption;
+ uint8_t link_maintenance;
+ uint8_t ready_check;
+ uint8_t bstatus_read;
+ uint8_t max_cascade_check;
+ uint8_t max_devs_check;
+ uint8_t device_count_check;
+ uint8_t ksvlist_read;
+ uint8_t vp_read;
+ uint8_t ksvlist_vp_validation;
+
+ uint8_t hdcp_capable_dp;
+ uint8_t binfo_read_dp;
+ uint8_t r0p_available_dp;
+ uint8_t link_integiry_check;
+ uint8_t reauth_request_check;
+ uint8_t stream_encryption_dp;
+};
+
+union mod_hdcp_transition_input {
+ struct mod_hdcp_transition_input_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_message_hdcp1 {
+ uint8_t an[8];
+ uint8_t aksv[5];
+ uint8_t ainfo;
+ uint8_t bksv[5];
+ uint16_t r0p;
+ uint8_t bcaps;
+ uint16_t bstatus;
+ uint8_t ksvlist[635];
+ uint16_t ksvlist_size;
+ uint8_t vp[20];
+
+ uint16_t binfo_dp;
+};
+
+union mod_hdcp_message {
+ struct mod_hdcp_message_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_auth_counters {
+ uint8_t stream_management_retry_count;
+};
+
+/* contains values per connection */
+struct mod_hdcp_connection {
+ struct mod_hdcp_link link;
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
+ uint8_t is_repeater;
+ uint8_t is_km_stored;
+ struct mod_hdcp_trace trace;
+ uint8_t hdcp1_retry_count;
+};
+
+/* contains values per authentication cycle */
+struct mod_hdcp_authentication {
+ uint32_t id;
+ union mod_hdcp_message msg;
+ union mod_hdcp_transition_input trans_input;
+ struct mod_hdcp_auth_counters count;
+};
+
+/* contains values per state change */
+struct mod_hdcp_state {
+ uint8_t id;
+ uint32_t stay_count;
+};
+
+/* per event in a state */
+struct mod_hdcp_event_context {
+ enum mod_hdcp_event event;
+ uint8_t rx_id_list_ready;
+ uint8_t unexpected_event;
+};
+
+struct mod_hdcp {
+ /* per link */
+ struct mod_hdcp_config config;
+ /* per connection */
+ struct mod_hdcp_connection connection;
+ /* per authentication attempt */
+ struct mod_hdcp_authentication auth;
+ /* per state in an authentication */
+ struct mod_hdcp_state state;
+ /* reserved memory buffer */
+ uint8_t buf[2025];
+};
+
+enum mod_hdcp_initial_state_id {
+ HDCP_UNINITIALIZED = 0x0,
+ HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED,
+ HDCP_INITIALIZED,
+ HDCP_CP_NOT_DESIRED,
+ HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED
+};
+
+enum mod_hdcp_hdcp1_state_id {
+ HDCP1_STATE_START = HDCP_INITIAL_STATE_END,
+ H1_A0_WAIT_FOR_ACTIVE_RX,
+ H1_A1_EXCHANGE_KSVS,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER,
+ H1_A45_AUTHENTICATED,
+ H1_A8_WAIT_FOR_READY,
+ H1_A9_READ_KSV_LIST,
+ HDCP1_STATE_END = H1_A9_READ_KSV_LIST
+};
+
+enum mod_hdcp_hdcp1_dp_state_id {
+ HDCP1_DP_STATE_START = HDCP1_STATE_END,
+ D1_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D1_A1_EXCHANGE_KSVS,
+ D1_A23_WAIT_FOR_R0_PRIME,
+ D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER,
+ D1_A4_AUTHENTICATED,
+ D1_A6_WAIT_FOR_READY,
+ D1_A7_READ_KSV_LIST,
+ HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
+};
+
+/* hdcp1 executions and transitions */
+typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str);
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+
+/* log functions */
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size);
+/* TODO: add adjustment log */
+
+/* psp functions */
+enum mod_hdcp_status mod_hdcp_add_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_remove_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+/* ddc functions */
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+
+/* hdcp version helpers */
+static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
+ hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT);
+}
+
+/* hdcp state helpers */
+static inline uint8_t current_state(struct mod_hdcp *hdcp)
+{
+ return hdcp->state.id;
+}
+
+static inline void set_state_id(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output, uint8_t id)
+{
+ memset(&hdcp->state, 0, sizeof(hdcp->state));
+ hdcp->state.id = id;
+ /* callback timer should be reset per state */
+ output->callback_stop = 1;
+ output->watchdog_timer_stop = 1;
+ HDCP_NEXT_STATE_TRACE(hdcp, id, output);
+}
+
+static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_STATE_START &&
+ current_state(hdcp) <= HDCP1_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_DP_STATE_START &&
+ current_state(hdcp) <= HDCP1_DP_STATE_END);
+}
+
+static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
+}
+
+static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
+}
+
+static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_INITIALIZED;
+}
+
+/* transition operation helpers */
+static inline void increment_stay_counter(struct mod_hdcp *hdcp)
+{
+ hdcp->state.stay_count++;
+}
+
+static inline void fail_and_restart_in_ms(uint16_t time,
+ enum mod_hdcp_status *status,
+ struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+ output->watchdog_timer_needed = 0;
+ output->watchdog_timer_delay = 0;
+ *status = MOD_HDCP_STATUS_RESET_NEEDED;
+}
+
+static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+}
+
+static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
+ struct mod_hdcp_output *output)
+{
+ output->watchdog_timer_needed = 1;
+ output->watchdog_timer_delay = time;
+}
+
+/* connection topology helpers */
+static inline uint8_t is_display_active(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
+}
+
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
+static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+}
+
+static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_active(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline struct mod_hdcp_display *get_first_added_display(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_active_display_at_index(
+ struct mod_hdcp *hdcp, uint8_t index)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (hdcp->connection.displays[i].index == index &&
+ is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_empty_display_container(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (!is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline void reset_retry_counts(struct mod_hdcp *hdcp)
+{
+ hdcp->connection.hdcp1_retry_count = 0;
+}
+
+#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
new file mode 100644
index 000000000000..3db4a7da414f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+{
+ uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+ uint8_t count = 0;
+
+ while (n) {
+ count++;
+ n &= (n - 1);
+ }
+ return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+}
+
+static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
+{
+ if (is_dp_hdcp(hdcp))
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
+}
+
+static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ if (is_dp_hdcp(hdcp)) {
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ }
+ return status;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return is_dp_hdcp(hdcp) ?
+ (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
+ (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+}
+
+static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
+ &input->an_write, &status,
+ hdcp, "an_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
+ &input->aksv_write, &status,
+ hdcp, "aksv_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(validate_bksv,
+ &input->bksv_validation, &status,
+ hdcp, "bksv_validation"))
+ goto out;
+ if (hdcp->auth.msg.hdcp1.ainfo) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
+ &input->ainfo_write, &status,
+ hdcp, "ainfo_write"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
+ &input->r0p_read, &status,
+ hdcp, "r0p_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
+ &input->rx_validation, &status,
+ hdcp, "rx_validation"))
+ goto out;
+ if (hdcp->connection.is_repeater) {
+ if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ &input->link_maintenance, &status,
+ hdcp, "link_maintenance"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_ksv_ready,
+ &input->ready_check, &status,
+ hdcp, "ready_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ uint8_t device_count;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
+ &input->binfo_read_dp, &status,
+ hdcp, "binfo_read_dp"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_no_max_cascade,
+ &input->max_cascade_check, &status,
+ hdcp, "max_cascade_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_max_devs,
+ &input->max_devs_check, &status,
+ hdcp, "max_devs_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check, &status,
+ hdcp, "device_count_check"))
+ goto out;
+ device_count = get_device_count(hdcp);
+ hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
+ &input->ksvlist_read, &status,
+ hdcp, "ksvlist_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
+ &input->vp_read, &status,
+ hdcp, "vp_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
+ &input->ksvlist_vp_validation, &status,
+ hdcp, "ksvlist_vp_validation"))
+ goto out;
+ if (input->encryption != PASS)
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
+ &input->hdcp_capable_dp, &status,
+ hdcp, "hdcp_capable_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
+ &input->r0p_available_dp, &status,
+ hdcp, "r0p_available_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+out:
+ return status;
+}
+
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
+{
+ *status = func(hdcp);
+ if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, str);
+ *flag = PASS;
+ } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
+ HDCP_INPUT_FAIL_TRACE(hdcp, str);
+ *flag = FAIL;
+ }
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ status = wait_for_active_rx(hdcp, event_ctx, input);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case H1_A45_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(
+ hdcp, event_ctx, input);
+ break;
+ case D1_A4_AUTHENTICATED:
+ status = authenticated_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
new file mode 100644
index 000000000000..136b8011ff3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ if (input->bksv_read != PASS || input->bcaps_read != PASS) {
+ /* 1A-04: repeatedly attempts on port access failure */
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(300, output);
+ set_state_id(hdcp, output,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ if (input->bcaps_read != PASS ||
+ input->r0p_read != PASS ||
+ input->rx_validation != PASS ||
+ (!conn->is_repeater && input->encryption != PASS)) {
+ /* 1A-06: consider invalid r0' a failure */
+ /* 1A-08: consider bksv listed in SRM a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case H1_A45_AUTHENTICATED:
+ if (input->link_maintenance != PASS) {
+ /* 1A-07: consider invalid ri' a failure */
+ /* 1A-07a: consider read ri' not returned a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-03: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue ksv list READY polling*/
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ if (input->bstatus_read != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS ||
+ input->device_count_check != PASS ||
+ input->ksvlist_read != PASS ||
+ input->vp_read != PASS ||
+ input->ksvlist_vp_validation != PASS ||
+ input->encryption != PASS) {
+ /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
+ /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-04: consider invalid v' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->bcaps_read != PASS) {
+ /* 1A-04: no authentication on bcaps read failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->hdcp_capable_dp != PASS) {
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ if (input->bstatus_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->r0p_available_dp != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ if (input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1A-06: consider invalid r0' a failure
+ * after 3 attempts.
+ * 1A-08: consider bksv listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if ((!conn->is_repeater && input->encryption != PASS) ||
+ (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
+ } else {
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case D1_A4_AUTHENTICATED:
+ if (input->link_integiry_check != PASS ||
+ input->reauth_request_check != PASS) {
+ /* 1A-07: restart hdcp on a link integrity failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ if (input->link_integiry_check == FAIL ||
+ input->reauth_request_check == FAIL) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-04: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ if (input->binfo_read_dp != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS) {
+ /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->device_count_check != PASS) {
+ /*
+ * some slow dongle doesn't update
+ * device count as soon as downstream is connected.
+ * give it more time to react.
+ */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (input->ksvlist_read != PASS ||
+ input->vp_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ksvlist_vp_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1B-05: consider invalid v' a failure
+ * after 3 attempts.
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if (input->encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
new file mode 100644
index 000000000000..e7baae059b85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
+#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
+#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+
+enum mod_hdcp_ddc_message_id {
+ MOD_HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
+ MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ MOD_HDCP_MESSAGE_ID_READ_VH_X,
+ MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ MOD_HDCP_MESSAGE_ID_READ_BINFO,
+
+ MOD_HDCP_MESSAGE_ID_MAX
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+};
+
+static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[msg_id],
+ buf,
+ (uint32_t)buf_len);
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len,
+ uint8_t read_size)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, read_size);
+ status = read(hdcp, msg_id, buf + data_offset, cur_size);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+
+ return status;
+}
+
+static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+ hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp->buf,
+ (uint32_t)(buf_len+1));
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
+ hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ 1);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+}
+
+/* special case, reading repeatedly at the same address, don't use read() */
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size,
+ KSV_READ_SIZE);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ &hdcp->auth.msg.hdcp1.vp[0], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ &hdcp->auth.msg.hdcp1.vp[4], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ &hdcp->auth.msg.hdcp1.vp[8], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ &hdcp->auth.msg.hdcp1.vp[12], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ &hdcp->auth.msg.hdcp1.vp[16], 4);
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+}
+
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+}
+
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
new file mode 100644
index 000000000000..3982ced5f969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "hdcp.h"
+
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size)
+{
+ const uint8_t bytes_per_line = 16,
+ byte_size = 3,
+ newline_size = 1,
+ terminator_size = 1;
+ uint32_t line_count = msg_size / bytes_per_line,
+ trailing_bytes = msg_size % bytes_per_line;
+ uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
+ byte_size * trailing_bytes + newline_size + terminator_size;
+ uint32_t buf_pos = 0;
+ uint32_t i = 0;
+
+ if (buf_size >= target_size) {
+ for (i = 0; i < msg_size; i++) {
+ if (i % bytes_per_line == 0)
+ buf[buf_pos++] = '\n';
+ sprintf(&buf[buf_pos], "%02X ", msg[i]);
+ buf_pos += byte_size;
+ }
+ buf[buf_pos++] = '\0';
+ }
+}
+
+char *mod_hdcp_status_to_str(int32_t status)
+{
+ switch (status) {
+ case MOD_HDCP_STATUS_SUCCESS:
+ return "MOD_HDCP_STATUS_SUCCESS";
+ case MOD_HDCP_STATUS_FAILURE:
+ return "MOD_HDCP_STATUS_FAILURE";
+ case MOD_HDCP_STATUS_RESET_NEEDED:
+ return "MOD_HDCP_STATUS_RESET_NEEDED";
+ case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
+ case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
+ case MOD_HDCP_STATUS_INVALID_STATE:
+ return "MOD_HDCP_STATUS_INVALID_STATE";
+ case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
+ return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
+ case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
+ return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
+ case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
+ return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
+ case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
+ case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
+ return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
+ case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
+ return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
+ case MOD_HDCP_STATUS_DDC_FAILURE:
+ return "MOD_HDCP_STATUS_DDC_FAILURE";
+ case MOD_HDCP_STATUS_INVALID_OPERATION:
+ return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ default:
+ return "MOD_HDCP_STATUS_UNKNOWN";
+ }
+}
+
+char *mod_hdcp_state_id_to_str(int32_t id)
+{
+ switch (id) {
+ case HDCP_UNINITIALIZED:
+ return "HDCP_UNINITIALIZED";
+ case HDCP_INITIALIZED:
+ return "HDCP_INITIALIZED";
+ case HDCP_CP_NOT_DESIRED:
+ return "HDCP_CP_NOT_DESIRED";
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ return "H1_A0_WAIT_FOR_ACTIVE_RX";
+ case H1_A1_EXCHANGE_KSVS:
+ return "H1_A1_EXCHANGE_KSVS";
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
+ case H1_A45_AUTHENTICATED:
+ return "H1_A45_AUTHENTICATED";
+ case H1_A8_WAIT_FOR_READY:
+ return "H1_A8_WAIT_FOR_READY";
+ case H1_A9_READ_KSV_LIST:
+ return "H1_A9_READ_KSV_LIST";
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D1_A1_EXCHANGE_KSVS:
+ return "D1_A1_EXCHANGE_KSVS";
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ return "D1_A23_WAIT_FOR_R0_PRIME";
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
+ case D1_A4_AUTHENTICATED:
+ return "D1_A4_AUTHENTICATED";
+ case D1_A6_WAIT_FOR_READY:
+ return "D1_A6_WAIT_FOR_READY";
+ case D1_A7_READ_KSV_LIST:
+ return "D1_A7_READ_KSV_LIST";
+ default:
+ return "UNKNOWN_STATE_ID";
+ };
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
new file mode 100644
index 000000000000..2fd0e0a893ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_LOG_H_
+#define MOD_HDCP_LOG_H_
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
+#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#endif
+
+/* default logs */
+#define HDCP_ERROR_TRACE(hdcp, status) \
+ HDCP_LOG_ERR(hdcp, \
+ "[Link %d] ERROR %s IN STATE %s", \
+ hdcp->config.index, \
+ mod_hdcp_status_to_str(status), \
+ mod_hdcp_state_id_to_str(hdcp->state.id))
+#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+/* state machine logs */
+#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] HDCP_REMOVE_DISPLAY index %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_INPUT_PASS_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tPASS %s", \
+ hdcp->config.index, str)
+#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tFAIL %s", \
+ hdcp->config.index, str)
+#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \
+ if (output->watchdog_timer_needed) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s with %d ms watchdog", \
+ hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \
+ else \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s", hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id)); \
+} while (0)
+#define HDCP_TIMEOUT_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index)
+#define HDCP_CPIRQ_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
+#define HDCP_EVENT_TRACE(hdcp, event) \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp)
+/* TODO: find some way to tell if logging is off to save time */
+#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \
+ msg_name, hdcp->buf); \
+} while (0)
+#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \
+ hdcp->config.index, msg_name,\
+ hdcp->buf); \
+} while (0)
+#define HDCP_FULL_DDC_TRACE(hdcp) do { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+} while (0)
+#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
+ hdcp->config.index)
+#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
+#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index)
+#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \
+} while (0)
+#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
+} while (0)
+
+#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
new file mode 100644
index 000000000000..646d909bbc37
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#define MAX_NUM_DISPLAYS 24
+
+
+#include "hdcp.h"
+
+#include "amdgpu.h"
+#include "hdcp_psp.h"
+
+enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ uint8_t i;
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ display = &hdcp->connection.displays[i];
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_link *link = &hdcp->connection.link;
+ uint8_t i;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
+ display = &hdcp->connection.displays[i];
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
+ TA_HDCP__HDCP1_KSV_SIZE);
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
+ /* needs second part of authentication */
+ hdcp->connection.is_repeater = 1;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ hdcp->connection.is_repeater = 0;
+ } else
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
+ }
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
+ is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ int i = 0;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+
+ return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
new file mode 100644
index 000000000000..986fc07ea9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_HDCP_HDCP_PSP_H_
+#define MODULES_HDCP_HDCP_PSP_H_
+
+/*
+ * NOTE: These parameters are a one-to-one copy of the
+ * parameters required by PSP
+ */
+enum bgd_security_hdcp_encryption_level {
+ HDCP_ENCRYPTION_LEVEL__INVALID = 0,
+ HDCP_ENCRYPTION_LEVEL__OFF,
+ HDCP_ENCRYPTION_LEVEL__ON
+};
+
+enum ta_dtm_command {
+ TA_DTM_COMMAND__UNUSED_1 = 1,
+ TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
+ TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
+};
+
+/* DTM related enumerations */
+/**********************************************************/
+
+enum ta_dtm_status {
+ TA_DTM_STATUS__SUCCESS = 0x00,
+ TA_DTM_STATUS__GENERIC_FAILURE = 0x01,
+ TA_DTM_STATUS__INVALID_PARAMETER = 0x02,
+ TA_DTM_STATUS__NULL_POINTER = 0x3
+};
+
+/* input/output structures for DTM commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+enum ta_dtm_hdcp_version_max_supported {
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23
+};
+
+struct ta_dtm_topology_update_input_v2 {
+ /* display handle is unique across the driver and is used to identify a display */
+ /* for all security interfaces which reference displays such as HDCP */
+ uint32_t display_handle;
+ uint32_t is_active;
+ uint32_t is_miracast;
+ uint32_t controller;
+ uint32_t ddc_line;
+ uint32_t dig_be;
+ uint32_t dig_fe;
+ uint32_t dp_mst_vcid;
+ uint32_t is_assr;
+ uint32_t max_hdcp_supported_version;
+};
+
+struct ta_dtm_topology_assr_enable {
+ uint32_t display_topology_dig_be_index;
+};
+
+/**
+ * Output structures
+ */
+
+/* No output structures yet */
+
+union ta_dtm_cmd_input {
+ struct ta_dtm_topology_update_input_v2 topology_update_v2;
+ struct ta_dtm_topology_assr_enable topology_assr_enable;
+};
+
+union ta_dtm_cmd_output {
+ uint32_t reserved;
+};
+
+struct ta_dtm_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_dtm_status dtm_status;
+ uint32_t reserved;
+ union ta_dtm_cmd_input dtm_in_message;
+ union ta_dtm_cmd_output dtm_out_message;
+};
+
+int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
+ uint64_t fence_mc_addr);
+
+enum ta_hdcp_command {
+ TA_HDCP_COMMAND__INITIALIZE,
+ TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
+ TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION,
+ TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+};
+
+
+/* HDCP related enumerations */
+/**********************************************************/
+#define TA_HDCP__INVALID_SESSION 0xFFFF
+#define TA_HDCP__HDCP1_AN_SIZE 8
+#define TA_HDCP__HDCP1_KSV_SIZE 5
+#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
+#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+
+enum ta_hdcp_status {
+ TA_HDCP_STATUS__SUCCESS = 0x00,
+ TA_HDCP_STATUS__GENERIC_FAILURE = 0x01,
+ TA_HDCP_STATUS__NULL_POINTER = 0x02,
+ TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03,
+ TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04,
+ TA_HDCP_STATUS__INVALID_PARAMETER = 0x05,
+ TA_HDCP_STATUS__VHX_ERROR = 0x06,
+ TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07,
+ TA_HDCP_STATUS__SRM_FAILURE = 0x08,
+ TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09,
+ TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A,
+ TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B,
+ TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C,
+ TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D,
+ TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E,
+ TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F,
+ TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10,
+ TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11,
+ TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12,
+ TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13,
+ TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14,
+ TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15,
+ TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16,
+ TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17
+};
+
+enum ta_hdcp_authentication_status {
+ TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+};
+
+
+/* input/output structures for HDCP commands */
+/**********************************************************/
+struct ta_hdcp_cmd_hdcp1_create_session_input {
+ uint8_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_create_session_output {
+ uint32_t session_handle;
+ uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_primary;
+ uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_input {
+ uint32_t session_handle;
+ uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bcaps;
+ uint16_t r0_prime_primary;
+ uint16_t r0_prime_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_input {
+ uint32_t session_handle;
+ uint16_t bstatus_binfo;
+ uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE];
+ uint32_t ksv_list_size;
+ uint8_t pj_prime;
+ uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
+ uint32_t protection_level;
+};
+
+/**********************************************************/
+/* Common input structure for HDCP callbacks */
+union ta_hdcp_cmd_input {
+ struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
+ struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+};
+
+/* Common output structure for HDCP callbacks */
+union ta_hdcp_cmd_output {
+ struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+};
+/**********************************************************/
+
+struct ta_hdcp_shared_memory {
+ uint32_t cmd_id;
+ enum ta_hdcp_status hdcp_status;
+ uint32_t reserved;
+ union ta_hdcp_cmd_input in_msg;
+ union ta_hdcp_cmd_output out_msg;
+};
+
+enum psp_status {
+ PSP_STATUS__SUCCESS = 0,
+ PSP_STATUS__ERROR_INVALID_PARAMS,
+ PSP_STATUS__ERROR_GENERIC,
+ PSP_STATUS__ERROR_OUT_OF_MEMORY,
+ PSP_STATUS__ERROR_UNSUPPORTED_FEATURE
+};
+
+#endif /* MODULES_HDCP_HDCP_PSP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
new file mode 100644
index 000000000000..dea21702edff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_H_
+#define MOD_HDCP_H_
+
+#include "os_types.h"
+#include "signal_types.h"
+
+/* Forward Declarations */
+struct mod_hdcp;
+
+#define MAX_NUM_OF_DISPLAYS 6
+#define MAX_NUM_OF_ATTEMPTS 4
+#define MAX_NUM_OF_ERROR_TRACE 10
+
+/* detailed return status */
+enum mod_hdcp_status {
+ MOD_HDCP_STATUS_SUCCESS = 0,
+ MOD_HDCP_STATUS_FAILURE,
+ MOD_HDCP_STATUS_RESET_NEEDED,
+ MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
+ MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
+ MOD_HDCP_STATUS_INVALID_STATE,
+ MOD_HDCP_STATUS_NOT_IMPLEMENTED,
+ MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
+ MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
+ MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
+ MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
+ MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
+ MOD_HDCP_STATUS_INVALID_OPERATION,
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
+};
+
+struct mod_hdcp_displayport {
+ uint8_t rev;
+ uint8_t assr_supported;
+};
+
+struct mod_hdcp_hdmi {
+ uint8_t reserved;
+};
+enum mod_hdcp_operation_mode {
+ MOD_HDCP_MODE_OFF,
+ MOD_HDCP_MODE_DEFAULT,
+ MOD_HDCP_MODE_DP,
+ MOD_HDCP_MODE_DP_MST
+};
+
+enum mod_hdcp_display_state {
+ MOD_HDCP_DISPLAY_INACTIVE = 0,
+ MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
+ MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
+};
+
+struct mod_hdcp_ddc {
+ void *handle;
+ struct {
+ bool (*read_i2c)(void *handle,
+ uint32_t address,
+ uint8_t offset,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_i2c)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ bool (*read_dpcd)(void *handle,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_dpcd)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ } funcs;
+};
+
+struct mod_hdcp_psp {
+ void *handle;
+ void *funcs;
+};
+
+struct mod_hdcp_display_adjustment {
+ uint8_t disable : 1;
+ uint8_t reserved : 7;
+};
+
+struct mod_hdcp_link_adjustment_hdcp1 {
+ uint8_t disable : 1;
+ uint8_t postpone_encryption : 1;
+ uint8_t reserved : 6;
+};
+
+struct mod_hdcp_link_adjustment_hdcp2 {
+ uint8_t disable : 1;
+ uint8_t disable_type1 : 1;
+ uint8_t force_no_stored_km : 1;
+ uint8_t increase_h_prime_timeout: 1;
+ uint8_t reserved : 4;
+};
+
+struct mod_hdcp_link_adjustment {
+ uint8_t auth_delay;
+ struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
+ struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_error {
+ enum mod_hdcp_status status;
+ uint8_t state_id;
+};
+
+struct mod_hdcp_trace {
+ struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
+ uint8_t error_count;
+};
+
+enum mod_hdcp_encryption_status {
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+};
+
+/* per link events dm has to notify to hdcp module */
+enum mod_hdcp_event {
+ MOD_HDCP_EVENT_CALLBACK = 0,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ MOD_HDCP_EVENT_CPIRQ
+};
+
+/* output flags from module requesting timer operations */
+struct mod_hdcp_output {
+ uint8_t callback_needed;
+ uint8_t callback_stop;
+ uint8_t watchdog_timer_needed;
+ uint8_t watchdog_timer_stop;
+ uint16_t callback_delay;
+ uint16_t watchdog_timer_delay;
+};
+
+/* used to represent per display info */
+struct mod_hdcp_display {
+ enum mod_hdcp_display_state state;
+ uint8_t index;
+ uint8_t controller;
+ uint8_t dig_fe;
+ union {
+ uint8_t vc_id;
+ };
+ struct mod_hdcp_display_adjustment adjust;
+};
+
+/* used to represent per link info */
+/* in case a link has multiple displays, they share the same link info */
+struct mod_hdcp_link {
+ enum mod_hdcp_operation_mode mode;
+ uint8_t dig_be;
+ uint8_t ddc_line;
+ union {
+ struct mod_hdcp_displayport dp;
+ struct mod_hdcp_hdmi hdmi;
+ };
+ struct mod_hdcp_link_adjustment adjust;
+};
+
+/* a query structure for a display's hdcp information */
+struct mod_hdcp_display_query {
+ const struct mod_hdcp_display *display;
+ const struct mod_hdcp_link *link;
+ const struct mod_hdcp_trace *trace;
+ enum mod_hdcp_encryption_status encryption_status;
+};
+
+/* contains values per on external display configuration change */
+struct mod_hdcp_config {
+ struct mod_hdcp_psp psp;
+ struct mod_hdcp_ddc ddc;
+ uint8_t index;
+};
+
+struct mod_hdcp;
+
+/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
+size_t mod_hdcp_get_memory_size(void);
+
+/* called per link on link creation */
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config);
+
+/* called per link on link destroy */
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp);
+
+/* called per display on cp_desired set to true */
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output);
+
+/* called per display on cp_desired set to false */
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output);
+
+/* called to query hdcp information on a specific index */
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query);
+
+/* called per link on connectivity change */
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output);
+
+/* called per link on events (i.e. callback, watchdog, CP_IRQ) */
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output);
+
+/* called to convert enum mod_hdcp_status to c string */
+char *mod_hdcp_status_to_str(int32_t status);
+
+/* called to convert state id to c string */
+char *mod_hdcp_state_id_to_str(int32_t id);
+
+/* called to convert signal type to operation mode */
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal);
+#endif /* MOD_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index d930bdecb117..ca8ce3c55337 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -35,4 +35,7 @@ struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index d885d642ed7f..db6b08f6d093 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -31,6 +31,7 @@
#include "dc.h"
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HF_VSIF_VERSION 1
// VTEM Byte Offset
#define VTEM_PB0 0
@@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
+/**
+ *****************************************************************************
+ * Function: mod_build_hf_vsif_infopacket
+ *
+ * @brief
+ * Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
+ *
+ * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @param [out] info_packet: output structure where to store VSIF
+ *****************************************************************************
+ */
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+{
+ unsigned int length = 5;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ bool bALLM = (bool)ALLMEnabled;
+ bool bALLMVal = (bool)ALLMValue;
+
+ info_packet->valid = false;
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160
+ && format == TIMING_3D_FORMAT_NONE)
+ hdmi_vic_mode = true;
+
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ return;
+
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ if (bALLM) {
+ info_packet->sb[1] = 0xD8;
+ info_packet->sb[2] = 0x5D;
+ info_packet->sb[3] = 0xC4;
+ info_packet->sb[4] = HF_VSIF_VERSION;
+ }
+
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = (uint8_t) (length);
+
+ if (bALLM)
+ info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 05e2be856037..4e2f615c3566 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -80,18 +80,18 @@ struct abm_parameters {
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0},
- {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0},
- {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70},
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = {
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
- uint16_t flags; /* 0x00 U16 */
+ uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
@@ -140,10 +140,10 @@ struct iram_table_v_2 {
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
@@ -164,42 +164,43 @@ struct iram_table_v_2_2 {
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
- uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
- uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
- uint8_t pad[21]; /* 0x6b U0.8 */
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+ uint16_t min_abm_backlight; /* 0x6b U16 */
+ uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
- uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
- uint8_t dmcu_state; /* 0xf6 */
-
- uint8_t dummy1; /* 0xf7 */
- uint8_t dummy2; /* 0xf8 */
- uint8_t dummy3; /* 0xf9 */
- uint8_t dummy4; /* 0xfa */
- uint8_t dummy5; /* 0xfb */
- uint8_t dummy6; /* 0xfc */
- uint8_t dummy7; /* 0xfd */
- uint8_t dummy8; /* 0xfe */
- uint8_t dummy9; /* 0xff */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint8_t dummy1; /* 0xf7 */
+ uint8_t dummy2; /* 0xf8 */
+ uint8_t dummy3; /* 0xf9 */
+ uint8_t dummy4; /* 0xfa */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
@@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
{
unsigned int set = params.set;
- ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
@@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
@@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
unsigned int set = params.set;
ram_table->flags = 0x0;
+
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index da5df00fedce..e54157026330 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
unsigned int backlight_lut_array_size;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
+ unsigned int min_abm_backlight;
unsigned int set;
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
index a761ba07f937..fce965984e76 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmBUS_CNTL 0x1508
#define mmCONFIG_CNTL 0x1509
#define mmCONFIG_MEMSIZE 0x150a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
index 8fbfd0261d27..39cc4880beb4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0
#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 809759f7bb81..8d05d6ca1c8d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
index adc71b01f793..73435687d049 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
#define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index be4249adb356..eddf83ec1c39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -9859,6 +9859,8 @@
#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
#define mmDP0_DP_MSA_MISC 0x210e
#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP0_DP_VID_TIMING 0x2110
#define mmDP0_DP_VID_TIMING_BASE_IDX 2
#define mmDP0_DP_VID_N 0x2111
@@ -10187,6 +10189,8 @@
#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
#define mmDP1_DP_MSA_MISC 0x220e
#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_VID_TIMING 0x2210
#define mmDP1_DP_VID_TIMING_BASE_IDX 2
#define mmDP1_DP_VID_N 0x2211
@@ -10515,6 +10519,8 @@
#define mmDP2_DP_STEER_FIFO_BASE_IDX 2
#define mmDP2_DP_MSA_MISC 0x230e
#define mmDP2_DP_MSA_MISC_BASE_IDX 2
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_VID_TIMING 0x2310
#define mmDP2_DP_VID_TIMING_BASE_IDX 2
#define mmDP2_DP_VID_N 0x2311
@@ -10843,6 +10849,8 @@
#define mmDP3_DP_STEER_FIFO_BASE_IDX 2
#define mmDP3_DP_MSA_MISC 0x240e
#define mmDP3_DP_MSA_MISC_BASE_IDX 2
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_VID_TIMING 0x2410
#define mmDP3_DP_VID_TIMING_BASE_IDX 2
#define mmDP3_DP_VID_N 0x2411
@@ -11171,6 +11179,8 @@
#define mmDP4_DP_STEER_FIFO_BASE_IDX 2
#define mmDP4_DP_MSA_MISC 0x250e
#define mmDP4_DP_MSA_MISC_BASE_IDX 2
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_VID_TIMING 0x2510
#define mmDP4_DP_VID_TIMING_BASE_IDX 2
#define mmDP4_DP_VID_N 0x2511
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index ca16d9125fbc..2bfaaa8157d0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -1146,7 +1146,14 @@
#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
#define mmATC_L2_CGTT_CLK_CTRL 0x080c
#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810
+#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811
+#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2pfdec
// base address: 0xa100
@@ -1206,7 +1213,14 @@
#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
#define mmVM_L2_CGTT_CLK_CTRL 0x085e
#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmVM_L2_MEM_ECC_INDEX 0x0860
+#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_MEM_ECC_CNT 0x0862
+#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863
+#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2vcdec
// base address: 0xa200
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 064c4bb1dc62..d4c613a85352 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6661,7 +6661,6 @@
#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
// addressBlock: gc_utcl2_vml2pfdec
//VM_L2_CNTL
#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
@@ -6991,7 +6990,22 @@
#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
+//VM_L2_MEM_ECC_INDEX
+#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_WALKER_MEM_ECC_INDEX
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_MEM_ECC_CNT
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
+//VM_L2_WALKER_MEM_ECC_CNT
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
// addressBlock: gc_utcl2_vml2vcdec
//VM_CONTEXT0_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index 4bcacf529852..991128bb9476 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -22,6 +22,9 @@
#ifndef _nbio_7_4_0_SMN_HEADER
#define _nbio_7_4_0_SMN_HEADER
+// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnCPM_CONTROL 0x11180460
@@ -53,4 +56,13 @@
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+// addressBlock: nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13a10000
+#define smnIOHC_INTERRUPT_EOI 0x13a10120
+
+// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13a20000
+#define smnRAS_GLOBAL_STATUS_LO 0x13a20020
+#define smnRAS_GLOBAL_STATUS_HI 0x13a20024
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index 994e796a28d7..ce5830ebe095 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2793,8 +2793,8 @@
#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
#define mmBIF_FB_EN 0x00ff
#define mmBIF_FB_EN_BASE_IDX 2
-#define mmBIF_BUSY_DELAY_CNTR 0x0100
-#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
+#define mmBIF_INTR_CNTL 0x0100
+#define mmBIF_INTR_CNTL_BASE_IDX 2
#define mmBIF_MST_TRANS_PENDING_VF 0x0109
#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d467b939c971..07f04b2b5bdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -20420,9 +20420,9 @@
#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
-//BIF_BUSY_DELAY_CNTR
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
+//BIF_INTR_CNTL
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
//BIF_MST_TRANS_PENDING_VF
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
@@ -48436,4 +48436,47 @@
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+//IOHC_INTERRUPT_EOI
+#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0
+#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1
+#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2
+#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L
+#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L
+#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L
+
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index dc9895a684fe..096d878eb1de 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
@@ -588,11 +588,15 @@
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index dbc2e723f659..71169daa701a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 6af9f0217b34..61a9a84e0c3a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index bd3685166779..351446754c72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 627906674fe8..4bfd5f8ba66c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index f35aba72e640..21da61c398f5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -52,6 +52,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 481ee6560aa9..f64fe0fbcb32 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -220,6 +220,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
index d3876052562b..687d6843c258 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
@@ -121,6 +121,98 @@
#define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
#define mmCKSVII2C_IC_COMP_TYPE 0x006d
#define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CON 0x0080
+#define mmCKSVII2C1_IC_CON_BASE_IDX 0
+#define mmCKSVII2C1_IC_TAR 0x0081
+#define mmCKSVII2C1_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SAR 0x0082
+#define mmCKSVII2C1_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_MADDR 0x0083
+#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DATA_CMD 0x0084
+#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_STAT 0x008b
+#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_MASK 0x008c
+#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_RX_TL 0x008e
+#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_TL 0x008f
+#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_INTR 0x0090
+#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_START_DET 0x0099
+#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE 0x009b
+#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C1_IC_STATUS 0x009c
+#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_TXFLR 0x009d
+#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_RXFLR 0x009e
+#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_HOLD 0x009f
+#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_CR 0x00a2
+#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab
+#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac
+#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad
+#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define mmSMUIO_MP_RESET_INTR 0x00c1
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
#define mmSMUIO_SOC_HALT 0x00c2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
index f8afa3518bf2..6905a9618127 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
@@ -268,6 +268,182 @@
//CKSVII2C_IC_COMP_TYPE
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+//CKSVII2C1_IC_TX_TL
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+//CKSVII2C1_IC_RXFLR
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e88541d67aa0..dd7cbc00a0aa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1
/* Total 32bit cap indication */
enum atombios_firmware_capability
{
- ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
- ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
- ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
- ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
- ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
+ ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
+ ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
};
enum atom_cooling_solution_id{
@@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
+/* This is part of vram_usagebyfirmware_v2_1 */
+struct vram_reserve_block
+{
+ uint32_t start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t used_by_driver_in_kb;
+};
+
+/* Definitions for constance */
+enum atomfirmware_internal_constants
+{
+ ONE_KiB = 0x400,
+ ONE_MiB = 0x100000,
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 5dcb776548d8..7ec4331e67f2 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -25,7 +25,6 @@
#define _DISCOVERY_H_
#define PSP_HEADER_SIZE 256
-#define BINARY_MAX_SIZE (64 << 10)
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
new file mode 100644
index 000000000000..79af4258f259
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_NBIF_7_4_H__
+#define __IRQSRCS_NBIF_7_4_H__
+
+#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated
+#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off
+#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller
+#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT
+#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT
+#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC
+#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space
+
+#endif // __IRQSRCS_NBIF_7_4_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 98b9533e672b..2cd217e60125 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -291,15 +291,18 @@ struct kfd2kgd_calls {
uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_valid)(
+ bool (*get_atc_vmid_pasid_mapping_info)(
struct kgd_dev *kgd,
- uint8_t vmid);
- uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
- struct kgd_dev *kgd,
- uint8_t vmid);
+ uint8_t vmid,
+ uint16_t *p_pasid);
+ /* No longer needed from GFXv9 onward. The scratch base address is
+ * passed to the shader by the CP. It's the user mode driver's
+ * responsibility.
+ */
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 27cf0afaa0b4..5902f80d1fce 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -179,6 +179,11 @@ enum pp_mp1_state {
PP_MP1_STATE_RESET,
};
+enum pp_df_cstate {
+ DF_CSTATE_DISALLOW = 0,
+ DF_CSTATE_ALLOW,
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -312,6 +317,7 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
};
#endif
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 094648cac392..07633e22e99a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
@@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300
#define UVD0_BASE__INST6_SEG3 0
#define UVD0_BASE__INST6_SEG4 0
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0
+#define DCN_BASE__INST0_SEG4 0
+
+#define DCN_BASE__INST1_SEG0 0
+#define DCN_BASE__INST1_SEG1 0
+#define DCN_BASE__INST1_SEG2 0
+#define DCN_BASE__INST1_SEG3 0
+#define DCN_BASE__INST1_SEG4 0
+
+#define DCN_BASE__INST2_SEG0 0
+#define DCN_BASE__INST2_SEG1 0
+#define DCN_BASE__INST2_SEG2 0
+#define DCN_BASE__INST2_SEG3 0
+#define DCN_BASE__INST2_SEG4 0
+
+#define DCN_BASE__INST3_SEG0 0
+#define DCN_BASE__INST3_SEG1 0
+#define DCN_BASE__INST3_SEG2 0
+#define DCN_BASE__INST3_SEG3 0
+#define DCN_BASE__INST3_SEG4 0
+
+#define DCN_BASE__INST4_SEG0 0
+#define DCN_BASE__INST4_SEG1 0
+#define DCN_BASE__INST4_SEG2 0
+#define DCN_BASE__INST4_SEG3 0
+#define DCN_BASE__INST4_SEG4 0
#endif
diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65a2415..adf1b754666e 100644
--- a/drivers/gpu/drm/amd/include/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
@@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001,
typedef enum MTYPE {
MTYPE_NC = 0x00000000,
MTYPE_WC = 0x00000001,
+MTYPE_RW = 0x00000001,
MTYPE_CC = 0x00000002,
MTYPE_UC = 0x00000003,
} MTYPE;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa8ad7db2b3a..f4ff15378e61 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1421,6 +1421,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1548,6 +1549,23 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return ret;
}
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1606,4 +1624,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4acf139ea014..ee374df32b19 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -95,6 +95,52 @@ failed:
return size;
}
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
@@ -159,8 +205,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -168,27 +213,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -439,7 +464,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count || table_id < 0)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -463,7 +488,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
return ret;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -569,41 +594,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- feature_low = (feature_mask >> 0 ) & 0xffffffff;
- feature_high = (feature_mask >> 32) & 0xffffffff;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
- }
-
- return ret;
-}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
@@ -633,8 +624,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- uint64_t feature_mask = 0;
- int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -642,22 +631,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
WARN_ON(feature_id > feature->feature_num);
- feature_mask = 1ULL << feature_id;
-
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_mask, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
@@ -736,6 +712,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -919,14 +896,9 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
-
- if (table_count <= 0)
- return -EINVAL;
+ int ret, i;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
ret = amdgpu_bo_create_kernel(adev,
@@ -942,7 +914,7 @@ static int smu_init_fb_allocations(struct smu_context *smu)
return 0;
failed:
- for (; i > 0; i--) {
+ while (--i >= 0) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -957,13 +929,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
uint32_t i = 0;
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -974,50 +945,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
-}
-
static int smu_smc_table_hw_init(struct smu_context *smu,
bool initialize)
{
@@ -1092,8 +1019,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
if (ret)
return ret;
@@ -1226,11 +1153,10 @@ static int smu_free_memory_pool(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_start_smc_engine(struct smu_context *smu)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) {
@@ -1241,14 +1167,28 @@ static int smu_hw_init(void *handle)
}
ret = smu_check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ ret = smu_start_smc_engine(smu);
if (ret) {
- pr_err("SMC firmware status is not correct\n");
+ pr_err("SMU is not ready yet!\n");
return ret;
}
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
if (!smu->pm_enabled)
@@ -1291,6 +1231,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1303,6 +1248,18 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1344,7 +1301,10 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if(!(adev->flags & AMD_IS_APU))
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
@@ -1377,6 +1337,12 @@ static int smu_resume(void *handle)
mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
+
ret = smu_smc_table_hw_init(smu, false);
if (ret)
goto failed;
@@ -1385,6 +1351,11 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
+
mutex_unlock(&smu->mutex);
pr_info("SMU is resumed successfully!\n");
@@ -1529,7 +1500,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1727,7 +1699,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1742,7 +1714,7 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
ret = smu_enable_umd_pstate(smu, &level);
@@ -1766,6 +1738,122 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0)
+ return 0;
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ true);
+
+ return ret;
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+ void *table = watermarks->cpu_addr;
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d493a3f8c07a..90d871af8e58 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -112,8 +112,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
- MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
- MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
@@ -528,9 +527,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int arcturus_run_btc_afll(struct smu_context *smu)
+static int arcturus_run_btc(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -1891,6 +1898,35 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -1909,7 +1945,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
- .run_afll_btc = arcturus_run_btc_afll,
+ .run_btc = arcturus_run_btc,
/* dpm/clk tables */
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
@@ -1929,12 +1965,10 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &arcturus_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 34f95e0e3ea4..c805c6fcdba2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -5145,6 +5144,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d08493b67b67..f5dcba44f74a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f5915308e643..9295bd90b792 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -4155,6 +4158,24 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
return res;
}
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4223,6 +4244,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 23171a4d9a31..49f1caa7b1c5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,7 +259,6 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
uint16_t TDPODLimit;
@@ -322,6 +321,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -382,6 +388,7 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
@@ -398,7 +405,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -459,7 +466,11 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
void (*dump_pptable)(struct smu_context *smu);
int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
- int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
};
struct smu_funcs
@@ -485,7 +496,6 @@ struct smu_funcs
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
@@ -499,6 +509,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
@@ -522,8 +533,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -538,7 +547,10 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
};
#define smu_init_microcode(smu) \
@@ -585,9 +597,6 @@ struct smu_funcs
((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
#define smu_gfx_off_control(smu, enable) \
((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
#define smu_system_features_control(smu, en) \
@@ -636,8 +645,6 @@ struct smu_funcs
((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
#define smu_print_clk_levels(smu, clk_type, buf) \
((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
#define smu_get_od_percentage(smu, type) \
((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
#define smu_set_od_percentage(smu, type, value) \
@@ -650,6 +657,8 @@ struct smu_funcs
((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
#define smu_start_thermal_control(smu) \
((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->funcs->stop_thermal_control? (smu)->funcs->stop_thermal_control((smu)) : 0)
#define smu_read_sensor(smu, sensor, data, size) \
((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
#define smu_smc_read_sensor(smu, sensor, data, size) \
@@ -697,8 +706,8 @@ struct smu_funcs
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
#define smu_workload_get_type(smu, profile) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
#define smu_set_deep_sleep_dcefclk(smu, clk) \
@@ -727,8 +736,6 @@ struct smu_funcs
((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
#define smu_notify_smu_enable_pwe(smu) \
((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
#define smu_dpm_set_uvd_enable(smu, enable) \
((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
#define smu_dpm_set_vce_enable(smu, enable) \
@@ -751,21 +758,30 @@ struct smu_funcs
((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
#define smu_baco_is_support(smu) \
((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
#define smu_baco_get_state(smu, state) \
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
#define smu_baco_reset(smu) \
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
+#define smu_mode2_reset(smu) \
+ ((smu)->funcs->mode2_reset? (smu)->funcs->mode2_reset((smu)) : 0)
#define smu_asic_set_performance_level(smu, level) \
((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
#define smu_dump_pptable(smu) \
((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
-#define smu_get_dpm_uclk_limited(smu, clock, max) \
- ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->funcs->set_soft_freq_limited_range ? (smu)->funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+#define smu_get_dpm_clock_table(smu, clock_table) \
+ ((smu)->ppt_funcs->get_dpm_clock_table ? (smu)->ppt_funcs->get_dpm_clock_table((smu), (clock_table)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->funcs->override_pcie_parameters ? (smu)->funcs->override_pcie_parameters((smu)) : 0)
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -799,6 +815,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -828,10 +848,16 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index 78e5927b7711..e3291259b249 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -95,8 +95,7 @@
//BTC
#define PPSMC_MSG_RunAfllBtc 0x30
-#define PPSMC_MSG_RunGfxDcBtc 0x31
-#define PPSMC_MSG_RunSocDcBtc 0x32
+#define PPSMC_MSG_RunDcBtc 0x31
//Debug
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..bd8c922dfd3e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -355,6 +355,7 @@ struct pp_hwmgr_func {
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index e02950b505fa..2248d682c462 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -137,23 +137,23 @@
#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
-#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
-#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
-#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
-#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
-#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
//FIXME need updating
@@ -696,7 +696,11 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t GpioPadding;
- uint32_t BoardReserved[9];
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -802,7 +806,7 @@ typedef struct {
uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
- uint32_t EnabledAvfsModules[2];
+ uint32_t EnabledAvfsModules[3];
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
@@ -865,7 +869,8 @@ typedef struct {
//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
-#define TABLE_COUNT 9
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_COUNT 10
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index b0dd05d431dd..d8c9b7f91fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -114,6 +114,7 @@
__SMU_DUMMY_MAP(PowerDownJpeg), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
__SMU_DUMMY_MAP(RunGfxDcBtc), \
__SMU_DUMMY_MAP(RunSocDcBtc), \
__SMU_DUMMY_MAP(SetMemoryChannelEnable), \
@@ -168,6 +169,7 @@
__SMU_DUMMY_MAP(PowerGateAtHub), \
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -251,6 +253,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
__SMU_DUMMY_MAP(MMHUB_PG), \
__SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
__SMU_DUMMY_MAP(WAFL_CG),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5bda8539447a..9b856a6c5dfb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x0D
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 0b461404af6b..d59ec114f824 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -177,6 +177,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
@@ -327,40 +328,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -1451,18 +1464,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
- break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
+ default:
+ return -EINVAL;
}
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
@@ -1485,10 +1527,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->asic_type != CHIP_NAVI10)
- return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1591,6 +1629,28 @@ static int navi10_get_power_limit(struct smu_context *smu,
return 0;
}
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1629,12 +1689,10 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
.get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..a37e37c5f105 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,12 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e62bfba51562..57930c9e22ff 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -160,21 +160,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
* This interface just for getting uclk ultimate freq and should't introduce
* other likewise function result in overmuch callback.
*/
-static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- DpmClocks_t *table = smu->smu_table.clocks_table;
-
- if (!clock || !table)
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
- if (max)
- *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
- else
- *clock = table->FClocks[0].Freq;
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
return 0;
-
}
static int renoir_print_clk_levels(struct smu_context *smu,
@@ -246,20 +242,461 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return size;
}
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+ mutex_unlock(&(smu->mutex));
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ ret = smu_write_watermarks_table(smu);
+
+ return ret;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
- .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
.print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &renoir_ppt_funcs;
smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
+ smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c5257ae3188a..96200011f9bc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -35,6 +35,7 @@
#include "vega20_ppt.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
+#include "amd_pcie.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -439,7 +440,7 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -465,13 +466,12 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
smu_table->metrics_time = 0;
@@ -771,23 +771,6 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -1212,6 +1195,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
+static int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1328,26 +1320,6 @@ failed:
return ret;
}
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
-
- return ret;
-}
-
static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
@@ -1755,6 +1727,77 @@ failed:
return ret;
}
+static int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+
static const struct smu_funcs smu_v11_0_funcs = {
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
@@ -1775,7 +1818,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
.parse_pptable = smu_v11_0_parse_pptable,
.populate_smc_tables = smu_v11_0_populate_smc_pptable,
.write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.init_display_count = smu_v11_0_init_display_count,
@@ -1787,10 +1829,10 @@ static const struct smu_funcs smu_v11_0_funcs = {
.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
.start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
.read_sensor = smu_v11_0_read_sensor,
.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
@@ -1805,6 +1847,8 @@ static const struct smu_funcs smu_v11_0_funcs = {
.baco_set_state = smu_v11_0_baco_set_state,
.baco_reset = smu_v11_0_baco_reset,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9d2280ca1f4b..cac4269cf1d1 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -244,15 +244,6 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
- /* confirm gfx is back to "off" state, timeout is 5 seconds */
- while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
- msleep(10);
- timeout--;
- if (timeout == 0) {
- DRM_ERROR("enable gfxoff timeout and failed!\n");
- break;
- }
- }
} else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
@@ -275,7 +266,7 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -292,7 +283,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->clocks_table);
@@ -323,10 +314,18 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
uint32_t *min, uint32_t *max)
{
int ret = 0;
+ uint32_t mclk_mask, soc_mask;
mutex_lock(&smu->mutex);
if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -340,14 +339,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, max, true);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
if (ret)
goto failed;
break;
default:
ret = -EINVAL;
goto failed;
-
}
}
@@ -365,7 +370,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, min, false);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
if (ret)
goto failed;
break;
@@ -373,13 +385,70 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
ret = -EINVAL;
goto failed;
}
-
}
failed:
mutex_unlock(&smu->mutex);
return ret;
}
+static int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+static int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0;
+
+ if (max < min)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
static const struct smu_funcs smu_v12_0_funcs = {
.check_fw_status = smu_v12_0_check_fw_status,
.check_fw_version = smu_v12_0_check_fw_version,
@@ -394,6 +463,8 @@ static const struct smu_funcs smu_v12_0_funcs = {
.fini_smc_tables = smu_v12_0_fini_smc_tables,
.populate_smc_tables = smu_v12_0_populate_smc_tables,
.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
};
void smu_v12_0_set_smu_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index dc754447f0dd..23c12018dbc1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3f12cf341511..aa0ee2b46135 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 4728aa23a818..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0dbdde69f2d9..0f3836fd9666 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index f9589806bf83..90c782c132d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b9089c6bea85..f604612f411f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 7c960b07746f..ae18fbcb26fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
- data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bbd8ebd58434..bebf38ca4be7 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -143,6 +143,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
@@ -1274,14 +1275,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
mutex_lock(&(smu->mutex));
soft_min_level = mask ? (ffs(mask) - 1) : 0;
@@ -3141,6 +3136,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3153,7 +3191,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3183,13 +3221,12 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index c3d29c0b051b..f0ba26e282c3 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg)
i, hdr.output_ids[i]);
}
+/* On D71, we are using the global line size. From D32, every component have
+ * a line size register to indicate the fifo size.
+ */
+static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
+ u32 max_default)
+{
+ if (!d71->periph_addr)
+ max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+
+ return max_default;
+}
+
+static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
+{
+ return __get_blk_line_size(d71, reg, d71->max_line_size);
+}
+
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
@@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
+static int d71_layer_validate(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct komeda_layer *layer = to_layer(c);
+ struct drm_plane_state *plane_st;
+ struct drm_framebuffer *fb;
+ u32 fourcc, line_sz, max_line_sz;
+
+ plane_st = drm_atomic_get_new_plane_state(state->obj.state,
+ state->plane);
+ fb = plane_st->fb;
+ fourcc = fb->format->format;
+
+ if (drm_rotation_90_or_270(st->rot))
+ line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
+ else
+ line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
+
+ if (fb->modifier) {
+ if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ max_line_sz = layer->line_sz;
+ else
+ max_line_sz = layer->line_sz / 2;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+ }
+
+ if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
+ DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
+ DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct komeda_component_funcs d71_layer_funcs = {
+ .validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71,
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
- set_range(&layer->hsize_in, 4, d71->max_line_size);
+ if (!d71->periph_addr) {
+ /* D32 or newer product */
+ layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+ layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
+ } else if (d71->max_line_size > 2048) {
+ /* D71 4K */
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ /* D71 2K */
+ if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
+ /* rich layer is 4K configuration */
+ layer->line_sz = d71->max_line_size * 2;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = 0;
+ }
+ }
+
+ set_range(&layer->hsize_in, 4, layer->line_sz);
+
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
@@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71,
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
+ wb_layer->line_sz = get_blk_line_size(d71, reg);
+ wb_layer->yuv_line_sz = wb_layer->line_sz;
- set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
+ set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
@@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71,
compiz = to_compiz(c);
- set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
+ set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
@@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c,
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
- u32 v[9];
+ u32 v[10];
dump_block_header(sf, c->reg);
@@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
+
+ get_values_from_reg(c->reg, 0x130, 10, v);
+ seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
+ seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
+ seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
@@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71,
}
scaler = to_scaler(c);
- set_range(&scaler->hsize, 4, 2048);
+ set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
@@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71,
splitter = to_splitter(c);
- set_range(&splitter->hsize, 4, d71->max_line_size);
+ set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
@@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71,
merger = to_merger(c);
- set_range(&merger->hsize_merged, 4, 4032);
+ set_range(&merger->hsize_merged, 4,
+ __get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
@@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c,
{
struct komeda_improc_state *st = to_improc_st(state);
u32 __iomem *reg = c->reg;
- u32 index;
+ u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+ malidp_write32(reg, IPS_DEPTH, st->color_depth);
+
+ mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+
+ /* config color format */
+ if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ ctrl |= IPS_CTRL_YUV;
+
+ malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 2d5e6d00b42c..1727dc993909 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -10,6 +10,7 @@
/* Common block registers offset */
#define BLK_BLOCK_INFO 0x000
#define BLK_PIPELINE_INFO 0x004
+#define BLK_MAX_LINE_SIZE 0x008
#define BLK_VALID_INPUT_ID0 0x020
#define BLK_OUTPUT_ID0 0x060
#define BLK_INPUT_ID0 0x080
@@ -321,6 +322,7 @@
#define L_INFO_RF BIT(0)
#define L_INFO_CM BIT(1)
#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF)
/* Scaler registers */
#define SC_COEFFTAB 0x0DC
@@ -494,13 +496,6 @@ enum d71_blk_type {
#define D71_DEFAULT_PREPRETCH_LINE 5
#define D71_BUS_WIDTH_16_BYTES 16
-#define D71_MIN_LINE_SIZE 64
-#define D71_MIN_VERTICAL_SIZE 64
-#define D71_SC_MIN_LIN_SIZE 4
-#define D71_SC_MIN_VERTICAL_SIZE 4
-#define D71_SC_MAX_LIN_SIZE 2048
-#define D71_SC_MAX_VERTICAL_SIZE 4096
-
#define D71_SC_MAX_UPSCALING 64
#define D71_SC_MAX_DOWNSCALING 6
#define D71_SC_SPLIT_OVERLAP 8
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 75263d8cd0bd..252015210fbc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -17,6 +17,33 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_st;
+ u32 conn_color_formats = ~0u;
+ int i, min_bpc = 31, conn_bpc = 0;
+
+ for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
+ if (conn_st->crtc != crtc_st->crtc)
+ continue;
+
+ conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
+ conn_color_formats &= conn->display_info.color_formats;
+
+ if (conn_bpc < min_bpc)
+ min_bpc = conn_bpc;
+ }
+
+ /* connector doesn't config any color_format, use RGB444 as default */
+ if (!conn_color_formats)
+ conn_color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ *color_depths = GENMASK(min_bpc, 0);
+ *color_formats = conn_color_formats;
+}
+
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
@@ -296,7 +323,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
- struct completion *disable_done = &crtc->state->commit->flip_done;
+ struct completion *disable_done;
bool needs_phase2 = false;
DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 45c498e15e7a..456f3c435719 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats);
unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index b322f52ba8f2..bd6ca7c87037 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -227,6 +227,8 @@ struct komeda_layer {
/* accepted h/v input range before rotation */
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 line_sz;
+ u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
@@ -323,6 +325,7 @@ struct komeda_improc {
struct komeda_improc_state {
struct komeda_component_state base;
+ u8 color_format, color_depth;
u16 hsize, vsize;
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 0ba9c6aa3708..42bdc63dcffa 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
+ u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
@@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
return -EINVAL;
}
+ if (drm_rotation_90_or_270(dflow->rot))
+ line_sz = dflow->in_h;
+ else
+ line_sz = dflow->in_w;
+
+ if (kfb->base.format->hsub > 1)
+ max_line_sz = layer->yuv_line_sz;
+ else
+ max_line_sz = layer->line_sz;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
@@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc,
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
+ if (drm_atomic_crtc_needs_modeset(crtc_st)) {
+ u32 output_depths, output_formats;
+ u32 avail_depths, avail_formats;
+
+ komeda_crtc_get_color_config(crtc_st, &output_depths,
+ &output_formats);
+
+ avail_depths = output_depths & improc->supported_color_depths;
+ if (avail_depths == 0) {
+ DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
+ output_depths,
+ improc->supported_color_depths);
+ return -EINVAL;
+ }
+
+ avail_formats = output_formats &
+ improc->supported_color_formats;
+ if (!avail_formats) {
+ DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
+ output_formats,
+ improc->supported_color_formats);
+ return -EINVAL;
+ }
+
+ st->color_depth = __fls(avail_depths);
+ st->color_format = BIT(__ffs(avail_formats));
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index b72840c06ab7..e465cc4879c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
+ struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
@@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+ info = &kwb_conn->base.base.display_info;
+ info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
+ info->color_formats = kcrtc->master->improc->supported_color_formats;
+
kcrtc->wb_conn = kwb_conn;
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 333b88a5efb0..37d92a06318e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
return false;
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp,
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
-void malidp_error_stats_dump(const char *prefix,
- struct malidp_error_stats error_stats,
- struct seq_file *m)
+static void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
@@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
}
-DEVICE_ATTR_RO(core_id);
+static DEVICE_ATTR_RO(core_id);
static int malidp_init_sysfs(struct device *dev)
{
@@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
+ ret = of_property_read_u32(dev->of_node,
+ "arm,malidp-arqos-value",
+ &hwdev->arqos_value);
+ if (ret)
+ hwdev->arqos_value = 0x0;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index bd8265f02e0b..ca570b135478 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+
+ /*
+ * Program the RQoS register to avoid high resolutions flicker
+ * issue on the LS1028A.
+ */
+ if (hwdev->arqos_value) {
+ val = hwdev->arqos_value;
+ malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
+ }
}
int malidp_format_get_bpp(u32 fmt)
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 968a65eed371..e4c36bc90bda 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
+
+ /* priority level of RQOS register used for driven the ARQOS signal */
+ u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 993031542fa1..514c50dcb74d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
+/*
+ * The quality of service (QoS) register on the DP500. RQOS register values
+ * are driven by the ARQOS signal, using AXI transacations, dependent on the
+ * FIFO input level.
+ * The RQOS register can also set QoS levels for:
+ * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
+ * - GREEN_ARQOS @ A 4-bit signal value for normal conditions
+ */
+#define MALIDP500_RQOS_QUALITY 0x00500
+
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index e0e8770462bc..1f17794b0890 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -200,10 +200,7 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static const struct file_operations ast_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 89f5a756fa37..034f202dfe8f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_framebuffer *fb = state->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
- unsigned int tmp;
int ret;
int i;
@@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
- tmp = state->src_w;
- state->src_w = state->src_h;
- state->src_h = tmp;
+ swap(state->src_w, state->src_h);
}
if (!desc->layout.size &&
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 3b9b0d9bbc14..10460878414e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -58,10 +58,7 @@ err:
return ret;
}
-static const struct file_operations bochs_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 02a9c1ed165b..3f0006c2470d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
-static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-}
-
-static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
- drm_gem_vram_unpin(gbo);
-}
-
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
- .prepare_fb = bochs_pipe_prepare_fb,
- .cleanup_fb = bochs_pipe_cleanup_fb,
+ .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
+ .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 1cc9f502c1f2..34362976cd6f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -87,8 +87,7 @@ config DRM_SIL_SII8620
depends on OF
select DRM_KMS_HELPER
imply EXTCON
- select INPUT
- select RC_CORE
+ depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 31118d385580..274989f96a91 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -39,12 +39,20 @@
#define AUX_CH_BUFFER_SIZE 16
#define AUX_WAIT_TIMEOUT_MS 15
-static const u8 anx78xx_i2c_addresses[] = {
- [I2C_IDX_TX_P0] = TX_P0,
- [I2C_IDX_TX_P1] = TX_P1,
- [I2C_IDX_TX_P2] = TX_P2,
- [I2C_IDX_RX_P0] = RX_P0,
- [I2C_IDX_RX_P1] = RX_P1,
+static const u8 anx7808_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x78,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x70,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
};
struct anx78xx_platform_data {
@@ -63,7 +71,6 @@ struct anx78xx {
struct i2c_client *client;
struct edid *edid;
struct drm_connector connector;
- struct drm_dp_link link;
struct anx78xx_platform_data pdata;
struct mutex lock;
@@ -740,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
{
- u8 dp_bw, value;
+ u8 dp_bw, dpcd[2];
int err;
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
@@ -793,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /* Check link capabilities */
- err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to probe link capabilities: %d\n", err);
- return err;
- }
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
- /* Power up the sink */
- err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
- return err;
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
}
/* Possibly enable downspread on the sink */
@@ -843,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, value);
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
return err;
- err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
if (err < 0) {
- DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ DRM_ERROR("Failed to configure link: %d\n", err);
return err;
}
@@ -1316,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
unsigned int i, idl, idh, version;
+ const u8 *i2c_addresses;
bool found = false;
int err;
@@ -1355,15 +1386,16 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
}
/* Map slave addresses of ANX7814 */
+ i2c_addresses = device_get_match_data(&client->dev);
for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
struct i2c_client *i2c_dummy;
i2c_dummy = i2c_new_dummy_device(client->adapter,
- anx78xx_i2c_addresses[i] >> 1);
+ i2c_addresses[i] >> 1);
if (IS_ERR(i2c_dummy)) {
err = PTR_ERR(i2c_dummy);
DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
- anx78xx_i2c_addresses[i], err);
+ i2c_addresses[i], err);
goto err_unregister_i2c;
}
@@ -1373,7 +1405,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
if (IS_ERR(anx78xx->map[i])) {
err = PTR_ERR(anx78xx->map[i]);
DRM_ERROR("Failed regmap initialization %02x\n",
- anx78xx_i2c_addresses[i]);
+ i2c_addresses[i]);
goto err_unregister_i2c;
}
}
@@ -1472,10 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
- { .compatible = "analogix,anx7808", },
- { .compatible = "analogix,anx7812", },
- { .compatible = "analogix,anx7814", },
- { .compatible = "analogix,anx7818", },
+ { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+ { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
index 25e063bcecbc..55d6c2109740 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -6,15 +6,8 @@
#ifndef __ANX78xx_H
#define __ANX78xx_H
-#define TX_P0 0x70
-#define TX_P1 0x7a
-#define TX_P2 0x72
-
-#define RX_P0 0x7e
-#define RX_P1 0x80
-
/***************************************************************/
-/* Register definition of device address 0x7e */
+/* Register definitions for RX_PO */
/***************************************************************/
/*
@@ -171,7 +164,7 @@
#define SP_VSI_RCVD BIT(1)
/***************************************************************/
-/* Register definition of device address 0x80 */
+/* Register definitions for RX_P1 */
/***************************************************************/
/* HDCP BCAPS Shadow Register */
@@ -217,7 +210,7 @@
#define SP_SET_AVMUTE BIT(0)
/***************************************************************/
-/* Register definition of device address 0x70 */
+/* Register definitions for TX_P0 */
/***************************************************************/
/* HDCP Status Register */
@@ -451,7 +444,7 @@
#define SP_DP_BUF_DATA0_REG 0xf0
/***************************************************************/
-/* Register definition of device address 0x72 */
+/* Register definitions for TX_P2 */
/***************************************************************/
/*
@@ -674,7 +667,7 @@
#define SP_INT_CTRL_REG 0xff
/***************************************************************/
-/* Register definition of device address 0x7a */
+/* Register definitions for TX_P1 */
/***************************************************************/
/* DP TX Link Training Control Register */
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index ad00d841ed9e..f81f81b7051f 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -842,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->client[I2C_MHL] = client;
- ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
- if (!ctx->client[I2C_TPI]) {
+ ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_TPI_ADDR);
+ if (IS_ERR(ctx->client[I2C_TPI])) {
dev_err(ctx->dev, "failed to create TPI client\n");
- return -ENODEV;
+ return PTR_ERR(ctx->client[I2C_TPI]);
}
- ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
- if (!ctx->client[I2C_HDMI]) {
+ ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_HDMI_ADDR);
+ if (IS_ERR(ctx->client[I2C_HDMI])) {
dev_err(ctx->dev, "failed to create HDMI RX client\n");
- goto fail_tpi;
+ return PTR_ERR(ctx->client[I2C_HDMI]);
}
- ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
- if (!ctx->client[I2C_CBUS]) {
+ ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_CBUS_ADDR);
+ if (IS_ERR(ctx->client[I2C_CBUS])) {
dev_err(ctx->dev, "failed to create CBUS client\n");
- goto fail_hdmi;
+ return PTR_ERR(ctx->client[I2C_CBUS]);
}
return 0;
-
-fail_hdmi:
- i2c_unregister_device(ctx->client[I2C_HDMI]);
-fail_tpi:
- i2c_unregister_device(ctx->client[I2C_TPI]);
-
- return -ENODEV;
-}
-
-static void sii9234_deinit_resources(struct sii9234 *ctx)
-{
- i2c_unregister_device(ctx->client[I2C_CBUS]);
- i2c_unregister_device(ctx->client[I2C_HDMI]);
- i2c_unregister_device(ctx->client[I2C_TPI]);
}
static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
@@ -951,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client)
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
- sii9234_deinit_resources(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 14643923a721..4c0eef406eb1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -1760,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
scancode &= MHL_RCP_KEY_ID_MASK;
- if (!ctx->rc_dev) {
- dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev)
return false;
- }
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
@@ -2100,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
struct rc_dev *rc_dev;
int ret;
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
@@ -2214,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_unregister_device(ctx->rc_dev);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index a15fbf71b9d7..dbe38a54870b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -25,6 +25,7 @@
#include <uapi/linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -1743,6 +1744,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
HDMI_FC_DATAUTO0_VSD_MASK);
}
+static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi)
+{
+ const struct drm_connector_state *conn_state = hdmi->connector.state;
+ struct hdmi_drm_infoframe frame;
+ u8 buffer[30];
+ ssize_t err;
+ int i;
+
+ if (!hdmi->plat_data->use_drm_infoframe)
+ return;
+
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+
+ err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
+ if (err < 0)
+ return;
+
+ err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err);
+ return;
+ }
+
+ hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0);
+ hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1);
+
+ for (i = 0; i < frame.length; i++)
+ hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i);
+
+ hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+}
+
static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
@@ -2054,7 +2090,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
@@ -2064,6 +2100,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step F - Configure AVI InfoFrame */
hdmi_config_AVI(hdmi, mode);
hdmi_config_vendor_specific_infoframe(hdmi, mode);
+ hdmi_config_drm_infoframe(hdmi);
} else {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
@@ -2230,6 +2267,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+
+static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ if (!hdr_metadata_equal(old_state, new_state)) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -2254,6 +2330,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
+ .atomic_check = dw_hdmi_connector_atomic_check,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2274,6 +2351,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
drm_connector_attach_encoder(connector, encoder);
cec_fill_conn_info_from_drm(&conn_info, connector);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index fcff5059db24..1999db05bc3b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -254,6 +254,7 @@
#define HDMI_FC_POL2 0x10DB
#define HDMI_FC_PRCONF 0x10E0
#define HDMI_FC_SCRAMBLER_CTRL 0x10E1
+#define HDMI_FC_PACKET_TX_EN 0x10E3
#define HDMI_FC_GMD_STAT 0x1100
#define HDMI_FC_GMD_EN 0x1101
@@ -289,6 +290,37 @@
#define HDMI_FC_GMD_PB26 0x111F
#define HDMI_FC_GMD_PB27 0x1120
+#define HDMI_FC_DRM_UP 0x1167
+#define HDMI_FC_DRM_HB0 0x1168
+#define HDMI_FC_DRM_HB1 0x1169
+#define HDMI_FC_DRM_PB0 0x116A
+#define HDMI_FC_DRM_PB1 0x116B
+#define HDMI_FC_DRM_PB2 0x116C
+#define HDMI_FC_DRM_PB3 0x116D
+#define HDMI_FC_DRM_PB4 0x116E
+#define HDMI_FC_DRM_PB5 0x116F
+#define HDMI_FC_DRM_PB6 0x1170
+#define HDMI_FC_DRM_PB7 0x1171
+#define HDMI_FC_DRM_PB8 0x1172
+#define HDMI_FC_DRM_PB9 0x1173
+#define HDMI_FC_DRM_PB10 0x1174
+#define HDMI_FC_DRM_PB11 0x1175
+#define HDMI_FC_DRM_PB12 0x1176
+#define HDMI_FC_DRM_PB13 0x1177
+#define HDMI_FC_DRM_PB14 0x1178
+#define HDMI_FC_DRM_PB15 0x1179
+#define HDMI_FC_DRM_PB16 0x117A
+#define HDMI_FC_DRM_PB17 0x117B
+#define HDMI_FC_DRM_PB18 0x117C
+#define HDMI_FC_DRM_PB19 0x117D
+#define HDMI_FC_DRM_PB20 0x117E
+#define HDMI_FC_DRM_PB21 0x117F
+#define HDMI_FC_DRM_PB22 0x1180
+#define HDMI_FC_DRM_PB23 0x1181
+#define HDMI_FC_DRM_PB24 0x1182
+#define HDMI_FC_DRM_PB25 0x1183
+#define HDMI_FC_DRM_PB26 0x1184
+
#define HDMI_FC_DBGFORCE 0x1200
#define HDMI_FC_DBGAUD0CH0 0x1201
#define HDMI_FC_DBGAUD1CH0 0x1202
@@ -744,6 +776,11 @@ enum {
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+/* FC_PACKET_TX_EN field values */
+ HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00,
+
/* FC_AVICONF0-FC_AVICONF3 field values */
HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 51664a2df731..8029478ffebb 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -229,7 +229,9 @@ static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
- struct drm_dp_link base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned int rate;
+ u8 num_lanes;
u8 assr;
bool scrambler_dis;
bool spread;
@@ -438,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
- if (tc->link.base.rate != 162000)
+ if (tc->link.rate != 162000)
reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
return reg;
}
@@ -663,23 +665,35 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
+ u8 revision, num_lanes;
+ unsigned int rate;
int ret;
u8 reg;
/* Read DP Rx Link Capability */
- ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd,
+ DP_RECEIVER_CAP_SIZE);
if (ret < 0)
goto err_dpcd_read;
- if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+
+ revision = tc->link.dpcd[DP_DPCD_REV];
+ rate = drm_dp_max_link_rate(tc->link.dpcd);
+ num_lanes = drm_dp_max_lane_count(tc->link.dpcd);
+
+ if (rate != 162000 && rate != 270000) {
dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
- tc->link.base.rate = 270000;
+ rate = 270000;
}
- if (tc->link.base.num_lanes > 2) {
+ tc->link.rate = rate;
+
+ if (num_lanes > 2) {
dev_dbg(tc->dev, "Falling to 2 lanes\n");
- tc->link.base.num_lanes = 2;
+ num_lanes = 2;
}
+ tc->link.num_lanes = num_lanes;
+
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
@@ -697,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
- tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
- (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
- tc->link.base.num_lanes,
- (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
- "enhanced" : "non-enhanced");
+ revision >> 4, revision & 0x0f,
+ (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.num_lanes,
+ drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ "enhanced" : "default");
dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
tc->link.spread ? "0.5%" : "0.0%",
tc->link.scrambler_dis ? "disabled" : "enabled");
@@ -729,6 +743,8 @@ static int tc_set_video_mode(struct tc_data *tc,
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
u32 dp0_syncval;
+ u32 bits_per_pixel = 24;
+ u32 in_bw, out_bw;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -736,7 +752,10 @@ static int tc_set_video_mode(struct tc_data *tc,
* (output active video bandwidth in bytes))
* Must be less than tu_size.
*/
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+ in_bw = mode->clock * bits_per_pixel / 8;
+ out_bw = tc->link.num_lanes * tc->link.rate;
+ max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
@@ -897,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
if (ret)
return ret;
@@ -907,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
@@ -970,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc)
}
/* Setup Link & DPRx Config for Training */
- ret = drm_dp_link_configure(aux, &tc->link.base);
+ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate);
+ tmp[1] = tc->link.num_lanes;
+
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
+ tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1014,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Enable DP0 to start Link Training */
ret = regmap_write(tc->regmap, DP0CTL,
- ((tc->link.base.capabilities &
- DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ (drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ EF_EN : 0) | DP_EN);
if (ret)
return ret;
@@ -1095,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = -ENODEV;
}
- if (tc->link.base.num_lanes == 2) {
+ if (tc->link.num_lanes == 2) {
value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
if (value != DP_CHANNEL_EQ_BITS) {
@@ -1166,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
value |= EF_EN;
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
@@ -1292,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
- avail = tc->link.base.num_lanes * tc->link.base.rate;
+ avail = tc->link.num_lanes * tc->link.rate;
if (req > avail)
return MODE_BAD;
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 89d9e6fdeb8c..248c9f765c45 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
@@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
-DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
deleted file mode 100644
index 1f73916e528e..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#ifndef __CIRRUS_DRV_H__
-#define __CIRRUS_DRV_H__
-
-#include <video/vga.h>
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-#include <drm/drm_gem.h>
-
-#define DRIVER_AUTHOR "Matthew Garrett"
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu Cirrus emulation"
-#define DRIVER_DATE "20110418"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-#define CIRRUSFB_CONN_LIMIT 1
-
-#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
-#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-#define WREG_SEQ(reg, v) \
- do { \
- WREG8(SEQ_INDEX, reg); \
- WREG8(SEQ_DATA, v); \
- } while (0) \
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-#define WREG_CRT(reg, v) \
- do { \
- WREG8(CRT_INDEX, reg); \
- WREG8(CRT_DATA, v); \
- } while (0) \
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-#define WREG_GFX(reg, v) \
- do { \
- WREG8(GFX_INDEX, reg); \
- WREG8(GFX_DATA, v); \
- } while (0) \
-
-/*
- * Cirrus has a "hidden" DAC register that can be accessed by writing to
- * the pixel mask register to reset the state, then reading from the register
- * four times. The next write will then pass to the DAC
- */
-#define VGA_DAC_MASK 0x6
-
-#define WREG_HDR(v) \
- do { \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- WREG8(VGA_DAC_MASK, v); \
- } while (0) \
-
-
-#define CIRRUS_MAX_FB_HEIGHT 4096
-#define CIRRUS_MAX_FB_WIDTH 4096
-
-#define CIRRUS_DPMS_CLEARED (-1)
-
-#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
-#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-
-struct cirrus_crtc {
- struct drm_crtc base;
- int last_dpms;
- bool enabled;
-};
-
-struct cirrus_fbdev;
-struct cirrus_mode_info {
- struct cirrus_crtc *crtc;
- /* pointer to fbdev info structure */
- struct cirrus_fbdev *gfbdev;
-};
-
-struct cirrus_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-struct cirrus_connector {
- struct drm_connector base;
-};
-
-struct cirrus_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
-};
-
-struct cirrus_device {
- struct drm_device *dev;
- unsigned long flags;
-
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
-
- struct cirrus_mc mc;
- struct cirrus_mode_info mode_info;
-
- int num_crtc;
- int fb_mtrr;
-
- struct {
- struct ttm_bo_device bdev;
- } ttm;
- bool mm_inited;
-};
-
-
-struct cirrus_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct drm_framebuffer *gfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
-struct cirrus_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
-
-static inline struct cirrus_bo *
-cirrus_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct cirrus_bo, bo);
-}
-
-
-#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-
- /* cirrus_main.c */
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
-void cirrus_device_fini(struct cirrus_device *cdev);
-void cirrus_gem_free_object(struct drm_gem_object *obj);
-int cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch);
-
- /* cirrus_display.c */
-int cirrus_modeset_init(struct cirrus_device *cdev);
-void cirrus_modeset_fini(struct cirrus_device *cdev);
-
- /* cirrus_fbdev.c */
-int cirrus_fbdev_init(struct cirrus_device *cdev);
-void cirrus_fbdev_fini(struct cirrus_device *cdev);
-
-
-
- /* cirrus_irq.c */
-void cirrus_driver_irq_preinstall(struct drm_device *dev);
-int cirrus_driver_irq_postinstall(struct drm_device *dev);
-void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
-
- /* cirrus_kms.c */
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-void cirrus_driver_unload(struct drm_device *dev);
-extern struct drm_ioctl_desc cirrus_ioctls[];
-extern int cirrus_max_ioctl;
-
-int cirrus_mm_init(struct cirrus_device *cirrus);
-void cirrus_mm_fini(struct cirrus_device *cirrus);
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo);
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo);
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
-
-extern int cirrus_bpp;
-
-#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index d02709dd2d4a..121481f6aa71 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -132,10 +132,10 @@
* planes. Without this property the primary plane is always below the cursor
* plane, and ordering between all other planes is undefined. The positive
* Z axis points towards the user, i.e. planes with lower Z position values
- * are underneath planes with higher Z position values. Note that the Z
- * position value can also be immutable, to inform userspace about the
- * hard-coded stacking of overlay planes, see
- * drm_plane_create_zpos_immutable_property().
+ * are underneath planes with higher Z position values. Two planes with the
+ * same Z position value have undefined ordering. Note that the Z position
+ * value can also be immutable, to inform userspace about the hard-coded
+ * stacking of planes, see drm_plane_create_zpos_immutable_property().
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d..03e01b000f7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index b457c16c3a8b..3ab2609f9ec7 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -8,10 +8,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+
+#include <media/cec.h>
+
#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
#include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
-#include <media/cec.h>
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index f373798d82f6..2c7870aef469 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane)
+{
+ unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
+ u8 value = dp_link_status(link_status, offset);
+
+ return (value >> (lane << 1)) & 0x3;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- udelay(100);
+ rd_interval = 100;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0)
- udelay(400);
+ rd_interval = 400;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
ret = aux->transfer(aux, &msg);
-
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
@@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
- * drm_dp_link_probe() - probe a DisplayPort link for capabilities
- * @aux: DisplayPort AUX channel
- * @link: pointer to structure in which to return link capabilities
- *
- * The structure filled in by this function can usually be passed directly
- * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
- * configure the link based on the link's capabilities.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_probe);
-
-/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_up);
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_down);
-
-/**
- * drm_dp_link_configure() - configure a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[2];
- int err;
-
- values[0] = drm_dp_link_rate_to_bw_code(link->rate);
- values[1] = link->num_lanes;
-
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_configure);
-
-/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6b14b63b8d62..85bef73a6763 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -28,6 +28,13 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stackdepot.h>
+#include <linux/sort.h>
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+#endif
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
@@ -45,6 +52,12 @@
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
+struct drm_dp_pending_up_req {
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct list_head next;
+};
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
@@ -61,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -1393,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
-static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+
+#define STACK_DEPTH 8
+
+static noinline void
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_ref_history *history,
+ enum drm_dp_mst_topology_ref_type type)
{
- struct drm_dp_mst_branch *mstb =
- container_of(kref, struct drm_dp_mst_branch, topology_kref);
- struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
- bool wake_tx = false;
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
+ depot_stack_handle_t backtrace;
+ ulong stack_entries[STACK_DEPTH];
+ uint n;
+ int i;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
- list_del(&port->next);
- drm_dp_mst_topology_put_port(port);
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
+ if (!backtrace)
+ return;
+
+ /* Try to find an existing entry for this backtrace */
+ for (i = 0; i < history->len; i++) {
+ if (history->entries[i].backtrace == backtrace) {
+ entry = &history->entries[i];
+ break;
+ }
}
- mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
- mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
+ /* Otherwise add one */
+ if (!entry) {
+ struct drm_dp_mst_topology_ref_entry *new;
+ int new_len = history->len + 1;
+
+ new = krealloc(history->entries, sizeof(*new) * new_len,
+ GFP_KERNEL);
+ if (!new)
+ return;
+
+ entry = &new[history->len];
+ history->len = new_len;
+ history->entries = new;
+
+ entry->backtrace = backtrace;
+ entry->type = type;
+ entry->count = 0;
}
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
- wake_tx = true;
+ entry->count++;
+ entry->ts_nsec = ktime_get_ns();
+}
+
+static int
+topology_ref_history_cmp(const void *a, const void *b)
+{
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
+
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
+ return 1;
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+static inline const char *
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
+{
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
+ return "get";
+ else
+ return "put";
+}
+
+static void
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+ void *ptr, const char *type_str)
+{
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int i;
+
+ if (!buf)
+ return;
+
+ if (!history->len)
+ goto out;
+
+ /* First, sort the list so that it goes from oldest to newest
+ * reference entry
+ */
+ sort(history->entries, history->len, sizeof(*history->entries),
+ topology_ref_history_cmp, NULL);
+
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
+ type_str, ptr);
+
+ for (i = 0; i < history->len; i++) {
+ const struct drm_dp_mst_topology_ref_entry *entry =
+ &history->entries[i];
+ ulong *entries;
+ uint nr_entries;
+ u64 ts_nsec = entry->ts_nsec;
+ u64 rem_nsec = do_div(ts_nsec, 1000000000);
+
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+
+ drm_printf(&p, " %d %ss (last at %5llu.%06llu):\n%s",
+ entry->count,
+ topology_ref_type_to_str(entry->type),
+ ts_nsec, rem_nsec / 1000, buf);
}
- mutex_unlock(&mstb->mgr->qlock);
- if (wake_tx)
- wake_up_all(&mstb->mgr->tx_waitq);
+ /* Now free the history, since this is the only time we expose it */
+ kfree(history->entries);
+out:
+ kfree(buf);
+}
- drm_dp_mst_put_mstb_malloc(mstb);
+static __always_inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
+{
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
+ "MSTB");
+}
+
+static __always_inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
+{
+ __dump_topology_ref_history(&port->topology_ref_history, port,
+ "Port");
+}
+
+static __always_inline void
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
+}
+
+static __always_inline void
+save_port_topology_ref(struct drm_dp_mst_port *port,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
+}
+
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->topology_ref_history_lock);
+}
+
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_unlock(&mgr->topology_ref_history_lock);
+}
+#else
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
+static inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+#define save_mstb_topology_ref(mstb, type)
+#define save_port_topology_ref(port, type)
+#endif
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+
+ drm_dp_mst_dump_mstb_topology_history(mstb);
+
+ INIT_LIST_HEAD(&mstb->destroy_next);
+
+ /*
+ * This can get called under mgr->mutex, so we need to perform the
+ * actual destruction of the mstb in another worker
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1453,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- int ret = kref_get_unless_zero(&mstb->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("mstb %p (%d)\n", mstb,
- kref_read(&mstb->topology_kref));
+ topology_ref_history_lock(mstb->mgr);
+ ret = kref_get_unless_zero(&mstb->topology_kref);
+ if (ret) {
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref));
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+
+ topology_ref_history_unlock(mstb->mgr);
return ret;
}
@@ -1478,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+
+ topology_ref_history_unlock(mstb->mgr);
}
/**
@@ -1498,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1);
- kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
-}
-
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
-{
- struct drm_dp_mst_branch *mstb;
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
- switch (old_pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mstb = port->mstb;
- port->mstb = NULL;
- drm_dp_mst_topology_put_mstb(mstb);
- break;
- }
+ topology_ref_history_unlock(mstb->mgr);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
@@ -1527,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
- if (!port->input) {
- kfree(port->cached_edid);
+ drm_dp_mst_dump_port_topology_history(port);
- /*
- * The only time we don't have a connector
- * on an output port is if the connector init
- * fails.
- */
- if (port->connector) {
- /* we can't destroy the connector here, as
- * we might be holding the mode_config.mutex
- * from an EDID retrieval */
-
- mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->next, &mgr->destroy_connector_list);
- mutex_unlock(&mgr->destroy_connector_lock);
- schedule_work(&mgr->destroy_connector_work);
- return;
- }
- /* no need to clean up vcpi
- * as if we have no connector we never setup a vcpi */
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ /* There's nothing that needs locking to destroy an input port yet */
+ if (port->input) {
+ drm_dp_mst_put_port_malloc(port);
+ return;
}
- drm_dp_mst_put_port_malloc(port);
+
+ kfree(port->cached_edid);
+
+ /*
+ * we can't destroy the connector here, as we might be holding the
+ * mode_config.mutex from an EDID retrieval
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&port->next, &mgr->destroy_port_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1579,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- int ret = kref_get_unless_zero(&port->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("port %p (%d)\n", port,
- kref_read(&port->topology_kref));
+ topology_ref_history_lock(port->mgr);
+ ret = kref_get_unless_zero(&port->topology_kref);
+ if (ret) {
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+ topology_ref_history_unlock(port->mgr);
return ret;
}
@@ -1603,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+
+ topology_ref_history_unlock(port->mgr);
}
/**
@@ -1621,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1);
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
+
+ topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
@@ -1739,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-/*
- * return sends link address for new mstb
- */
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
{
- int ret;
- u8 rad[6], lct;
- bool send_link = false;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ struct drm_dp_mst_branch *mstb;
+ u8 rad[8], lct;
+ int ret = 0;
+
+ if (port->pdt == new_pdt)
+ return 0;
+
+ /* Teardown the old pdt, if there is one */
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /*
+ * If the new PDT would also have an i2c bus, don't bother
+ * with reregistering it
+ */
+ if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ new_pdt == DP_PEER_DEVICE_SST_SINK) {
+ port->pdt = new_pdt;
+ return 0;
+ }
+
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ break;
+ }
+
+ port->pdt = new_pdt;
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
+
case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p", port);
+ goto out;
+ }
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (port->mstb) {
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- send_link = true;
- }
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
+
+ /* And make sure we send a link address for this */
+ ret = 1;
break;
}
- return send_link;
+
+out:
+ if (ret < 0)
+ port->pdt = DP_PEER_DEVICE_NONE;
+ return ret;
}
/**
@@ -1903,44 +2118,130 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
static void
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ char proppath[255];
+ int ret;
+
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
+ if (!port->connector) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+ }
+
+ mgr->cbs->register_connector(port->connector);
+ return;
+
+error:
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+}
+
+/*
+ * Drop a topology reference, and unlink the port from the in-memory topology
+ * layout
+ */
+static void
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
+{
+ mutex_lock(&mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mgr->lock);
+ drm_dp_mst_topology_put_port(port);
+}
+
+static struct drm_dp_mst_port *
+drm_dp_mst_add_port(struct drm_device *dev,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb, u8 port_number)
+{
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
+ if (!port)
+ return NULL;
+
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
+ port->parent = mstb;
+ port->port_num = port_number;
+ port->mgr = mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
+ return port;
+}
+
+static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- bool ret;
- bool created = false;
- int old_pdt = 0;
- int old_ddps = 0;
+ int old_ddps = 0, ret;
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
if (!port)
- return;
- kref_init(&port->topology_kref);
- kref_init(&port->malloc_kref);
- port->parent = mstb;
- port->port_num = port_msg->port_number;
- port->mgr = mstb->mgr;
- port->aux.name = "DPMST";
- port->aux.dev = dev->dev;
- port->aux.is_remote = true;
-
- /*
- * Make sure the memory allocation for our parent branch stays
- * around until our own memory allocation is released
+ return -ENOMEM;
+ created = true;
+ changed = true;
+ } else if (!port->input && port_msg->input_port && port->connector) {
+ /* Since port->connector can't be changed here, we create a
+ * new port if input_port changes from 0 to 1
*/
- drm_dp_mst_get_mstb_malloc(mstb);
-
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ drm_dp_mst_topology_put_port(port);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
+ if (!port)
+ return -ENOMEM;
+ changed = true;
created = true;
- } else {
- old_pdt = port->pdt;
+ } else if (port->input && !port_msg->input_port) {
+ changed = true;
+ } else if (port->connector) {
+ /* We're updating a port that's exposed to userspace, so do it
+ * under lock
+ */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+
old_ddps = port->ddps;
+ changed = port->ddps != port_msg->ddps ||
+ (port->ddps &&
+ (port->ldps != port_msg->legacy_device_plug_status ||
+ port->dpcd_rev != port_msg->dpcd_revision ||
+ port->mcs != port_msg->mcs ||
+ port->pdt != port_msg->peer_device_type ||
+ port->num_sdp_stream_sinks !=
+ port_msg->num_sdp_stream_sinks));
}
- port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
+ if (!port->input)
+ new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
@@ -1951,78 +2252,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
- mutex_lock(&mstb->mgr->lock);
+ mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
- mutex_unlock(&mstb->mgr->lock);
+ mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
- drm_dp_send_enum_path_resources(mstb->mgr,
- mstb, port);
+ drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
}
} else {
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
-
- ret = drm_dp_port_setup_pdt(port);
- if (ret == true)
- drm_dp_send_link_address(mstb->mgr, port->mstb);
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ send_link_addr = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
+ port, ret);
+ goto fail;
}
- if (created && !port->input) {
- char proppath[255];
-
- build_mst_prop_path(mstb, port->port_num, proppath,
- sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
- port,
- proppath);
- if (!port->connector) {
- /* remove it from the port list */
- mutex_lock(&mstb->mgr->lock);
- list_del(&port->next);
- mutex_unlock(&mstb->mgr->lock);
- /* drop port list reference */
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector,
- &port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
- (*mstb->mgr->cbs->register_connector)(port->connector);
+ /*
+ * If this port wasn't just created, then we're reprobing because
+ * we're coming out of suspend. In this case, always resend the link
+ * address if there's an MSTB on this port
+ */
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ send_link_addr = true;
+
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (!port->input)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+ if (send_link_addr && port->mstb) {
+ ret = drm_dp_send_link_address(mgr, port->mstb);
+ if (ret == 1) /* MSTB below us changed */
+ changed = true;
+ else if (ret < 0)
+ goto fail_put;
}
-out:
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
+ return changed;
+
+fail:
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+fail_put:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
static void
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_pdt;
- int old_ddps;
- bool dowork = false;
+ int old_ddps, ret;
+ u8 new_pdt;
+ bool dowork = false, create_connector = false;
+
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
+ if (port->connector) {
+ if (!port->input && conn_stat->input_port) {
+ /*
+ * We can't remove a connector from an already exposed
+ * port, so just throw the port out and make sure we
+ * reprobe the link address of it's parent MSTB
+ */
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ mstb->link_address_sent = false;
+ dowork = true;
+ goto out;
+ }
+
+ /* Locking is only needed if the port's exposed to userspace */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ } else if (port->input && !conn_stat->input_port) {
+ create_connector = true;
+ /* Reprobe link address so we get num_sdp_streams */
+ mstb->link_address_sent = false;
+ dowork = true;
+ }
+
old_ddps = port->ddps;
- old_pdt = port->pdt;
- port->pdt = conn_stat->peer_device_type;
+ port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2034,17 +2361,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
- if (drm_dp_port_setup_pdt(port))
- dowork = true;
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
+
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ dowork = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
+ port, ret);
+ dowork = false;
}
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (create_connector)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+out:
drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
-
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -2130,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb;
}
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent)
- drm_dp_send_link_address(mgr, mstb);
+ int ret;
+ bool changed = false;
+
+ if (!mstb->link_address_sent) {
+ ret = drm_dp_send_link_address(mgr, mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
+ }
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
- if (!port->available_pbn)
+ if (!port->available_pbn) {
+ drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
+ drm_modeset_unlock(&mgr->base.lock);
+ changed = true;
+ }
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ ret = drm_dp_check_and_send_link_address(mgr,
+ mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
}
+
+ return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ mutex_lock(&mgr->probe_lock);
+
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
@@ -2173,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
- if (mstb) {
- drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_mst_topology_put_mstb(mstb);
+ if (!mstb) {
+ mutex_unlock(&mgr->probe_lock);
+ return;
}
+
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ mutex_unlock(&mgr->probe_lock);
+ if (ret)
+ drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -2422,16 +2787,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
}
}
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
- int i, len, ret;
+ struct drm_dp_mst_port *port, *tmp;
+ int i, len, ret, port_mask = 0;
+ bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return;
+ return -ENOMEM;
txmsg->dst = mstb;
len = build_link_address(txmsg);
@@ -2457,16 +2824,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_check_mstb_guid(mstb, reply->guid);
- for (i = 0; i < reply->nports; i++)
- drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
- &reply->ports[i]);
+ for (i = 0; i < reply->nports; i++) {
+ port_mask |= BIT(reply->ports[i].port_number);
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
+ &reply->ports[i]);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ goto out;
+ }
- drm_kms_helper_hotplug_event(mgr->dev);
+ /* Prune any ports that are currently a part of mstb in our in-memory
+ * topology, but were not seen in this link address. Usually this
+ * means that they were removed while the topology was out of sync,
+ * e.g. during suspend/resume
+ */
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ if (port_mask & BIT(port->port_num))
+ continue;
+
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
+ port->port_num);
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ changed = true;
+ }
+ mutex_unlock(&mgr->lock);
out:
if (ret <= 0)
mstb->link_address_sent = false;
kfree(txmsg);
+ return ret < 0 ? ret : changed;
}
static int
@@ -3071,6 +3461,23 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+static void
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ /* The link address will need to be re-sent on resume */
+ mstb->link_address_sent = false;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ /* The PBN for each port will also need to be re-probed */
+ port->available_pbn = 0;
+
+ if (port->mstb)
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
+ }
+}
+
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
@@ -3084,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ flush_work(&mgr->delayed_destroy_work);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_state && mgr->mst_primary)
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
+ * @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
- * if the device fails this returns -1, and the driver should do
+ * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
+ *
+ * During system resume (where it is assumed that the driver will be calling
+ * drm_atomic_helper_resume()) this function should be called beforehand with
+ * @sync set to true. In contexts like runtime resume where the driver is not
+ * expected to be calling drm_atomic_helper_resume(), this function should be
+ * called with @sync set to false in order to avoid deadlocking.
+ *
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
+ * otherwise.
*/
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync)
{
- int ret = 0;
+ int ret;
+ u8 guid[16];
mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
- if (mgr->mst_primary) {
- int sret;
- u8 guid[16];
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (sret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ /*
+ * For the final step of resuming the topology, we need to bring the
+ * state of our in-memory topology back into sync with reality. So,
+ * restart the probing process as if we're probing a new hub
+ */
+ queue_work(system_long_wq, &mgr->work);
+ mutex_unlock(&mgr->lock);
- ret = 0;
- } else
- ret = -1;
+ if (sync) {
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ flush_work(&mgr->work);
+ }
-out_unlock:
+ return 0;
+
+out_fail:
mutex_unlock(&mgr->lock);
- return ret;
+ return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
@@ -3256,12 +3690,78 @@ clear_down_rep_recv:
return 0;
}
+static inline bool
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_pending_up_req *up_req)
+{
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
+ bool hotplug = false;
+
+ if (hdr->broadcast) {
+ const u8 *guid = NULL;
+
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
+ guid = msg->u.conn_stat.guid;
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+ guid = msg->u.resource_stat.guid;
+
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ } else {
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+ }
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ return false;
+ }
+
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
+
+ drm_dp_mst_topology_put_mstb(mstb);
+ return hotplug;
+}
+
+static void drm_dp_mst_up_req_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ up_req_work);
+ struct drm_dp_pending_up_req *up_req;
+ bool send_hotplug = false;
+
+ mutex_lock(&mgr->probe_lock);
+ while (true) {
+ mutex_lock(&mgr->up_req_lock);
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
+ struct drm_dp_pending_up_req,
+ next);
+ if (up_req)
+ list_del(&up_req->next);
+ mutex_unlock(&mgr->up_req_lock);
+
+ if (!up_req)
+ break;
+
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
+ kfree(up_req);
+ }
+ mutex_unlock(&mgr->probe_lock);
+
+ if (send_hotplug)
+ drm_kms_helper_hotplug_event(mgr->dev);
+}
+
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
- struct drm_dp_mst_branch *mstb = NULL;
- const u8 *guid;
+ struct drm_dp_pending_up_req *up_req;
bool seqno;
if (!drm_dp_get_one_sb_msg(mgr, true))
@@ -3270,56 +3770,53 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (!mgr->up_req_recv.have_eomt)
return 0;
- if (!hdr->broadcast) {
- mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
- goto out;
- }
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
+ if (!up_req) {
+ DRM_ERROR("Not enough memory to process MST up req\n");
+ return -ENOMEM;
}
+ INIT_LIST_HEAD(&up_req->next);
seqno = hdr->seqno;
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY)
- guid = msg.u.conn_stat.guid;
- else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY)
- guid = msg.u.resource_stat.guid;
- else
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
+ kfree(up_req);
goto out;
-
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno,
- false);
-
- if (!mstb) {
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
- goto out;
- }
}
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ seqno, false);
+
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
- msg.u.conn_stat.port_number,
- msg.u.conn_stat.legacy_device_plug_status,
- msg.u.conn_stat.displayport_device_plug_status,
- msg.u.conn_stat.message_capability_status,
- msg.u.conn_stat.input_port,
- msg.u.conn_stat.peer_device_type);
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
- drm_kms_helper_hotplug_event(mgr->dev);
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
- msg.u.resource_stat.port_number,
- msg.u.resource_stat.available_pbn);
+ res_stat->port_number,
+ res_stat->available_pbn);
}
- drm_dp_mst_topology_put_mstb(mstb);
+ up_req->hdr = *hdr;
+ mutex_lock(&mgr->up_req_lock);
+ list_add_tail(&up_req->next, &mgr->up_req_list);
+ mutex_unlock(&mgr->up_req_lock);
+ queue_work(system_long_wq, &mgr->up_req_work);
+
out:
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
@@ -3366,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
+ * @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
- * @port: unverified pointer to a port
+ * @port: pointer to a port
*
- * This returns the current connection state for a port. It validates the
- * port pointer still exists so the caller doesn't require a reference
+ * This returns the current connection state for a port.
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ goto out;
+
+ ret = connector_status_disconnected;
+
if (!port->ddps)
goto out;
@@ -3391,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_SST_SINK:
- status = connector_status_connected;
+ ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
@@ -3399,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
- status = connector_status_connected;
+ ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
- return status;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -3540,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots, ret;
+ int prev_slots, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -3587,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
}
vcpi->vcpi = req_slots;
- ret = req_slots;
- return ret;
+ return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -3995,34 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_destroy_connector_work(struct work_struct *work)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_dp_mst_port *port;
- bool send_hotplug = false;
+ if (port->connector)
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_mst_put_port_malloc(port);
+}
+
+static inline void
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ }
+ mutex_unlock(&mgr->lock);
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up_all(&mstb->mgr->tx_waitq);
+
+ drm_dp_mst_put_mstb_malloc(mstb);
+}
+
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ delayed_destroy_work);
+ bool send_hotplug = false, go_again;
+
/*
* Not a regular list traverse as we have to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
+ struct drm_dp_mst_branch,
+ destroy_next);
+ if (mstb)
+ list_del(&mstb->destroy_next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!mstb)
+ break;
+
+ drm_dp_delayed_destroy_mstb(mstb);
+ go_again = true;
}
- list_del(&port->next);
- mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, port->connector);
+ for (;;) {
+ struct drm_dp_mst_port *port;
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ mutex_lock(&mgr->delayed_destroy_lock);
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
+ struct drm_dp_mst_port,
+ next);
+ if (port)
+ list_del(&port->next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!port)
+ break;
+
+ drm_dp_delayed_destroy_port(port);
+ send_hotplug = true;
+ go_again = true;
+ }
+ } while (go_again);
- drm_dp_mst_put_port_malloc(port);
- send_hotplug = true;
- }
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
@@ -4184,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_device *dev = mgr->dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -4212,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->destroy_connector_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_init(&mgr->topology_ref_history_lock);
+#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
+ INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@@ -4258,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -4270,10 +4849,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
- mutex_destroy(&mgr->destroy_connector_lock);
+ mutex_destroy(&mgr->delayed_destroy_lock);
mutex_destroy(&mgr->payload_lock);
mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_destroy(&mgr->topology_ref_history_lock);
+#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0552175313cb..474ac04d5600 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ /* Lenovo G50 */
+ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
@@ -2189,7 +2192,8 @@ static int standard_timing_level(struct edid *edid)
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
- return LEVEL_GTF;
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ return LEVEL_GTF;
}
return LEVEL_DMT;
}
@@ -3205,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic)
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
-/**
- * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
- * the input VIC from the CEA mode list
- * @video_code: ID given to each of the CEA modes
- *
- * Returns picture aspect ratio
- */
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
return edid_cea_modes[video_code].picture_aspect_ratio;
}
-EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
@@ -5168,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+static u8 drm_mode_hdmi_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
+
+ if (!has_hdmi_infoframe)
+ return 0;
+
+ /* No HDMI VIC when signalling 3D video format */
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ return 0;
+
+ return drm_match_hdmi_mode(mode);
+}
+
+static u8 drm_mode_cea_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ u8 vic;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (drm_mode_hdmi_vic(connector, mode))
+ return 0;
+
+ vic = drm_match_cea_mode(mode);
+
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink(connector) && vic > 64)
+ return 0;
+
+ return vic;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -5195,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_match_cea_mode(mode);
-
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && frame->video_code > 64)
- frame->video_code = 0;
-
- /*
- * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
- * we should send its VIC in vendor infoframes, else send the
- * VIC in AVI infoframes. Lets check if this mode is present in
- * HDMI 1.4b 4K modes
- */
- if (frame->video_code) {
- u8 vendor_if_vic = drm_match_hdmi_mode(mode);
- bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
-
- if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
- frame->video_code = 0;
- }
+ frame->video_code = drm_mode_cea_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5382,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+/**
+ * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
+ * bar information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ frame->right_bar = conn_state->tv.margins.right;
+ frame->left_bar = conn_state->tv.margins.left;
+ frame->top_bar = conn_state->tv.margins.top;
+ frame->bottom_bar = conn_state->tv.margins.bottom;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
@@ -5434,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
- u32 s3d_flags;
- u8 vic;
if (!frame || !mode)
return -EINVAL;
@@ -5443,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!has_hdmi_infoframe)
return -EINVAL;
- vic = drm_match_hdmi_mode(mode);
- s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
/*
* Even if it's not absolutely necessary to send the infoframe
@@ -5455,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
-
- if (vic && s3d_flags)
- return -EINVAL;
-
- err = hdmi_vendor_infoframe_init(frame);
- if (err < 0)
- return err;
-
- frame->vic = vic;
+ frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b75ae8555baf..8ebeccdeed23 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -92,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ * It will automatically set up deferred I/O if the driver requires a shadow
+ * buffer.
*
- * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
- * down by calling drm_fb_helper_fbdev_teardown().
+ * For other drivers, setup fbdev emulation by calling
+ * drm_fb_helper_fbdev_setup() and tear it down by calling
+ * drm_fb_helper_fbdev_teardown().
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@@ -127,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
- * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
+ * mmap page writes.
+ *
+ * Deferred I/O is not compatible with SHMEM. Such drivers should request an
+ * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@@ -680,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_defio_init - fbdev deferred I/O initialization
- * @fb_helper: driver-allocated fbdev helper
- *
- * This function allocates &fb_deferred_io, sets callback to
- * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
- * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
- * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
- *
- * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
- * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
- * affect other instances of that &fb_ops.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *info = fb_helper->fbdev;
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- info->fbdefio = fbdefio;
- fbdefio->delay = msecs_to_jiffies(50);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
-
- *fbops = *info->fbops;
- info->fbops = fbops;
-
- fb_deferred_io_init(info);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_defio_init);
-
-/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -2356,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers.
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6854f5867d51..2f2b889096b0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1099,22 +1099,34 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (obj->funcs && obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
- else
- return -EINVAL;
+ if (obj->funcs && obj->funcs->mmap) {
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f5918707672f..0810d3ef6961 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @filp: File object
+ * @obj: gem object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &file_operations.mmap handler in the DRM device file's
- * file_operations structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ * use this function as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_shmem_object *shmem;
int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+ shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
@@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index a534104d8bee..7412bfc5c05a 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -52,5 +52,22 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
+/**
+ * drm_gem_ttm_mmap() - mmap &ttm_buffer_object
+ * @gem: GEM object.
+ * @vma: vm area.
+ *
+ * This function can be used as &drm_gem_object_funcs.mmap
+ * callback.
+ */
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+ return ttm_bo_mmap_obj(vma, bo);
+}
+EXPORT_SYMBOL(drm_gem_ttm_mmap);
+
MODULE_DESCRIPTION("DRM gem ttm helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index dc7942981f4a..666cb4c22bb9 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -3,10 +3,13 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
+#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -552,13 +555,6 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
*pl = gbo->placement;
}
-static int drm_gem_vram_bo_driver_verify_access(struct drm_gem_vram_object *gbo,
- struct file *filp)
-{
- return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
- filp->private_data);
-}
-
static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
bool evict,
struct ttm_mem_reg *new_mem)
@@ -654,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+
+/**
+ * drm_gem_vram_plane_helper_prepare_fb() - \
+ * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * @plane: a DRM plane
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
+ if (!new_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ }
+
+ return 0;
+
+err_drm_gem_vram_unpin:
+ while (i) {
+ --i;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_vram_plane_helper_cleanup_fb() - \
+ * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * @plane: a DRM plane
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_plane_helper_prepare_fb().
+ */
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ if (!old_state->fb)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
+ if (!old_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+/**
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * @pipe: a simple display pipe
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
+{
+ return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
+
+/**
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * @pipe: a simple display pipe
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_simple_display_pipe_prepare_fb().
+ */
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
+
+/*
* PRIME helpers
*/
@@ -737,6 +856,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
+ .mmap = drm_gem_ttm_mmap,
.print_info = drm_gem_ttm_print_info,
};
@@ -822,20 +942,6 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
drm_gem_vram_bo_driver_evict_flags(gbo, placement);
}
-static int bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct drm_gem_vram_object *gbo;
-
- /* TTM may pass BOs that are not GEM VRAM BOs. */
- if (!drm_is_gem_vram(bo))
- return -EINVAL;
-
- gbo = drm_gem_vram_of_bo(bo);
-
- return drm_gem_vram_bo_driver_verify_access(gbo, filp);
-}
-
static void bo_driver_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_mem_reg *new_mem)
@@ -892,7 +998,6 @@ static struct ttm_bo_driver bo_driver = {
.init_mem_type = bo_driver_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
- .verify_access = bo_driver_verify_access,
.move_notify = bo_driver_move_notify,
.io_mem_reserve = bo_driver_io_mem_reserve,
.io_mem_free = bo_driver_io_mem_free,
@@ -908,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
- struct ttm_bo_global *glob = vmm->bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -971,12 +1075,6 @@ static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
ttm_bo_device_release(&vmm->bdev);
}
-static int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm)
-{
- return ttm_bo_mmap(filp, vma, &vmm->bdev);
-}
-
/*
* Helpers for integration with struct drm_device
*/
@@ -1032,30 +1130,3 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
dev->vram_mm = NULL;
}
EXPORT_SYMBOL(drm_vram_helper_release_mm);
-
-/*
- * Helpers for &struct file_operations
- */
-
-/**
- * drm_vram_mm_file_operations_mmap() - \
- Implements &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
-}
-EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index ccfb5b33c5e3..e34058c721be 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
unsigned int i;
for (i = 0; i < len; i++)
- data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 7bc03c3c154f..3b570a404933 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0a2316e0e812..0814211b0f3f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ vma->vm_private_data = obj;
+ drm_gem_object_get(obj);
+ return 0;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 046055719245..15fb516ae2d8 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -43,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
- return pipe->funcs->mode_valid(crtc, mode);
+ return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 9ec334663c2d..669c93fe2500 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1280,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
@@ -1351,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
@@ -1372,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
- struct dma_fence *iter, *last_signaled = NULL;
-
- dma_fence_chain_for_each(iter, fence) {
- if (iter->context != fence->context) {
- dma_fence_put(iter);
- /* It is most likely that timeline has
- * unorder points. */
- break;
+ struct dma_fence *iter, *last_signaled =
+ dma_fence_get(fence);
+
+ if (args->flags &
+ DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
+ point = fence->seqno;
+ } else {
+ dma_fence_chain_for_each(iter, fence) {
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
}
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
}
- point = dma_fence_is_signaled(last_signaled) ?
- last_signaled->seqno :
- to_dma_fence_chain(last_signaled)->prev_seqno;
dma_fence_put(last_signaled);
} else {
point = 0;
}
+ dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 4f7962b6427b..1659b13b178c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -1610,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1876,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
@@ -1934,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7b24338fad3c..6cfdb95fef2f 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
- struct {
+ static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
- } static const modes[] = {
+ } modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index f56852a503e8..8b784947ed3b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 03023fa0fb6f..f350ac1ead18 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
- /*create a new connetor*/
+ /*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 167c10767dd4..900e5499249d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index cc4c41748cfb..6527a97f68a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
- int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
@@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
hibmc_fb = to_hibmc_framebuffer(state->fb);
gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin bo: %d", ret);
- return;
- }
gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- return;
- }
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 4f52c83b9b4c..2fd4ca91a62d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -26,10 +26,7 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-static const struct file_operations hibmc_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 6c218bace2ce..a63790d32d75 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -806,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
- cec_notifier_set_phys_addr(priv->cec_notify,
- CEC_PHYS_ADDR_INVALID);
+ cec_notifier_phys_addr_invalidate(
+ priv->cec_notify);
}
handled = true;
@@ -1791,8 +1791,7 @@ static void tda998x_destroy(struct device *dev)
i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
+ cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
@@ -1917,7 +1916,7 @@ static int tda998x_create(struct device *dev)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(dev);
+ priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d958e789ab96..c61ac0c3acb5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -7640,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
if (ret) {
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 715b7109c388..9ae5b8b6bbbc 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -391,20 +391,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
- intel_connector->port);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -465,11 +452,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index bb4ddc6bb0a6..571dc369a7e9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -9,5 +9,6 @@ config DRM_LIMA
depends on COMMON_CLK
depends on OF
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index 38cc70281ba5..a85444b0a1d4 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -13,9 +13,7 @@ lima-y := \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
- lima_gem_prime.o \
lima_dlbu.o \
- lima_bcast.o \
- lima_object.o
+ lima_bcast.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index e3e0ca11382e..19829b543024 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -314,7 +314,7 @@ int lima_device_init(struct lima_device *ldev)
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
- &ldev->dlbu_dma, GFP_KERNEL);
+ &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..124efe4fa97b 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -12,7 +12,6 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
@@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
-static const struct file_operations lima_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .mmap = lima_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = {
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
- .gem_free_object_unlocked = lima_gem_free_object,
- .gem_open_object = lima_gem_object_open,
- .gem_close_object = lima_gem_object_close,
- .gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
@@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = {
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = lima_gem_create_object,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
- .gem_prime_mmap = lima_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4da21353c3a2..d0059d8c97d8 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
-#include <linux/pfn_t.h>
+#include <linux/pagemap.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -13,40 +13,55 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
-#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
- struct lima_bo *bo;
- struct lima_device *ldev = to_lima_dev(dev);
+ gfp_t mask;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ struct sg_table *sgt;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ obj = &shmem->base;
- bo = lima_bo_create(ldev, size, flags, NULL);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ /* Mali Utgard GPU can only support 32bit address space */
+ mask = mapping_gfp_mask(obj->filp->f_mapping);
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- err = drm_gem_handle_create(file, &bo->gem, handle);
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
+
+ err = drm_gem_handle_create(file, obj, handle);
+out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(obj);
return err;
}
-void lima_gem_free_object(struct drm_gem_object *obj)
+static void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- lima_bo_destroy(bo);
+ drm_gem_shmem_free_object(obj);
}
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
return lima_vm_bo_add(vm, bo, true);
}
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
lima_vm_bo_del(vm, bo);
}
+static const struct drm_gem_object_funcs lima_gem_funcs = {
+ .free = lima_gem_free_object,
+ .open = lima_gem_object_open,
+ .close = lima_gem_object_close,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct lima_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+
+ bo->base.base.funcs = &lima_gem_funcs;
+
+ return &bo->base.base;
+}
+
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
- int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
@@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*va = lima_vm_get_va(vm, bo);
- err = drm_gem_create_mmap_offset(obj);
- if (!err)
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
- return err;
-}
-
-static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct lima_bo *bo = to_lima_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
-
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
-
- return vmf_insert_mixed(vma, vmf->address, pfn);
-}
-
-const struct vm_operations_struct lima_gem_vm_ops = {
- .fault = lima_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-void lima_set_vma_flags(struct vm_area_struct *vma)
-{
- pgprot_t prot = vm_get_page_prot(vma->vm_flags);
-
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_page_prot = pgprot_writecombine(prot);
-}
-
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
return 0;
}
@@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = dma_resv_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
@@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
-}
-
-static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i, ret = 0, contended, slow_locked = -1;
-
- ww_acquire_init(ctx, &reservation_ww_class);
-
-retry:
- for (i = 0; i < nr_bos; i++) {
- if (i == slow_locked) {
- slow_locked = -1;
- continue;
- }
-
- ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
- if (ret < 0) {
- contended = i;
- goto err;
- }
- }
-
- ww_acquire_done(ctx);
- return 0;
-
-err:
- for (i--; i >= 0; i--)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
-
- if (slow_locked >= 0)
- ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
-
- if (ret == -EDEADLK) {
- /* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(
- &bos[contended]->gem.resv->lock, ctx);
- if (!ret) {
- slow_locked = contended;
- goto retry;
- }
- }
- ww_acquire_fini(ctx);
-
- return ret;
-}
-
-static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < nr_bos; i++)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
- ww_acquire_fini(ctx);
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
bos[i] = bo;
}
- err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
if (err)
goto err_out0;
@@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
else
- dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
}
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 556111a01135..1800feb3e47f 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -4,19 +4,37 @@
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
-struct lima_bo;
+#include <drm/drm_gem_shmem_helper.h>
+
struct lima_submit;
-extern const struct vm_operations_struct lima_gem_vm_ops;
+struct lima_bo {
+ struct drm_gem_shmem_object base;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base);
+}
+
+static inline size_t lima_bo_size(struct lima_bo *bo)
+{
+ return bo->base.base.size;
+}
+
+static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
+{
+ return bo->base.base.resv;
+}
-struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-void lima_gem_free_object(struct drm_gem_object *obj);
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
deleted file mode 100644
index e3eb251e0a12..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <linux/dma-buf.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-
-#include "lima_device.h"
-#include "lima_object.h"
-#include "lima_gem.h"
-#include "lima_gem_prime.h"
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct lima_device *ldev = to_lima_dev(dev);
- struct lima_bo *bo;
-
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
-
- return &bo->gem;
-}
-
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct lima_bo *bo = to_lima_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
-
- return drm_prime_pages_to_sg(bo->pages, npages);
-}
-
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
- return 0;
-}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
deleted file mode 100644
index 34b4d35c21e3..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_GEM_PRIME_H__
-#define __LIMA_GEM_PRIME_H__
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 8e1651d6a61f..97ec09dee572 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -8,7 +8,6 @@
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
-#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
deleted file mode 100644
index 87123b1d083c..000000000000
--- a/drivers/gpu/drm/lima/lima_object.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <drm/drm_prime.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-
-#include "lima_object.h"
-
-void lima_bo_destroy(struct lima_bo *bo)
-{
- if (bo->sgt) {
- kfree(bo->pages);
- drm_prime_gem_destroy(&bo->gem, bo->sgt);
- } else {
- if (bo->pages_dma_addr) {
- int i, npages = bo->gem.size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (bo->pages_dma_addr[i])
- dma_unmap_page(bo->gem.dev->dev,
- bo->pages_dma_addr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
- }
-
- if (bo->pages)
- drm_gem_put_pages(&bo->gem, bo->pages, true, true);
- }
-
- kfree(bo->pages_dma_addr);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags)
-{
- struct lima_bo *bo;
- int err;
-
- size = PAGE_ALIGN(size);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&bo->lock);
- INIT_LIST_HEAD(&bo->va);
-
- err = drm_gem_object_init(dev->ddev, &bo->gem, size);
- if (err) {
- kfree(bo);
- return ERR_PTR(err);
- }
-
- return bo;
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt)
-{
- int i, err;
- size_t npages;
- struct lima_bo *bo, *ret;
-
- bo = lima_bo_create_struct(dev, size, flags);
- if (IS_ERR(bo))
- return bo;
-
- npages = bo->gem.size >> PAGE_SHIFT;
-
- bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bo->pages_dma_addr) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- if (sgt) {
- bo->sgt = sgt;
-
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- err = drm_prime_sg_to_page_addr_arrays(
- sgt, bo->pages, bo->pages_dma_addr, npages);
- if (err) {
- ret = ERR_PTR(err);
- goto err_out;
- }
- } else {
- mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
- bo->pages = drm_gem_get_pages(&bo->gem);
- if (IS_ERR(bo->pages)) {
- ret = ERR_CAST(bo->pages);
- bo->pages = NULL;
- goto err_out;
- }
-
- for (i = 0; i < npages; i++) {
- dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = ERR_PTR(-EFAULT);
- goto err_out;
- }
- bo->pages_dma_addr[i] = addr;
- }
-
- }
-
- return bo;
-
-err_out:
- lima_bo_destroy(bo);
- return ret;
-}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
deleted file mode 100644
index 31ca2d8dc0a1..000000000000
--- a/drivers/gpu/drm/lima/lima_object.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_OBJECT_H__
-#define __LIMA_OBJECT_H__
-
-#include <drm/drm_gem.h>
-
-#include "lima_device.h"
-
-struct lima_bo {
- struct drm_gem_object gem;
-
- struct page **pages;
- dma_addr_t *pages_dma_addr;
- struct sg_table *sgt;
- void *vaddr;
-
- struct mutex lock;
- struct list_head va;
-};
-
-static inline struct lima_bo *
-to_lima_bo(struct drm_gem_object *obj)
-{
- return container_of(obj, struct lima_bo, gem);
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt);
-void lima_bo_destroy(struct lima_bo *bo);
-void *lima_bo_vmap(struct lima_bo *bo);
-void lima_bo_vunmap(struct lima_bo *bo);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 4127cacac454..f522c5f99729 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -10,7 +10,7 @@
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
-#include "lima_object.h"
+#include "lima_gem.h"
struct lima_fence {
struct dma_fence base;
@@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
return -ENOMEM;
for (i = 0; i < num_bos; i++)
- drm_gem_object_get(&bos[i]->gem);
+ drm_gem_object_get(&bos[i]->base.base);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
@@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ drm_gem_object_put_unlocked(&task->bos[i]->base.base);
kfree(task->bos);
}
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 19e88ca16527..840e2350d872 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -6,7 +6,7 @@
#include "lima_device.h"
#include "lima_vm.h"
-#include "lima_object.h"
+#include "lima_gem.h"
#include "lima_regs.h"
struct lima_bo_va {
@@ -32,7 +32,7 @@ struct lima_bo_va {
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
-static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
@@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
}
}
-static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
- u32 start, u32 end)
+static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
{
- u64 addr;
- int i = 0;
-
- for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
- u32 pbe = LIMA_PBE(addr);
- u32 bte = LIMA_BTE(addr);
-
- if (!vm->bts[pbe].cpu) {
- dma_addr_t pts;
- u32 *pd;
- int j;
-
- vm->bts[pbe].cpu = dma_alloc_wc(
- vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
- &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
- if (!vm->bts[pbe].cpu) {
- if (addr != start)
- lima_vm_unmap_page_table(vm, start, addr - 1);
- return -ENOMEM;
- }
-
- pts = vm->bts[pbe].dma;
- pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
- for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
- pd[j] = pts | LIMA_VM_FLAG_PRESENT;
- pts += LIMA_PAGE_SIZE;
- }
+ u32 pbe = LIMA_PBE(va);
+ u32 bte = LIMA_BTE(va);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu)
+ return -ENOMEM;
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
}
-
- vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
+ vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
+
return 0;
}
@@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
- int err;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
mutex_lock(&bo->lock);
@@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
mutex_lock(&vm->lock);
- err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
if (err)
goto err_out1;
- err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
- if (err)
- goto err_out2;
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+ goto err_out2;
+
+ offset += PAGE_SIZE;
+ }
mutex_unlock(&vm->lock);
@@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
return 0;
err_out2:
+ if (offset)
+ lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
@@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
- lima_vm_unmap_page_table(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ lima_vm_unmap_range(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev)
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
- int err = lima_vm_map_page_table(
- vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
- LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ int err = lima_vm_map_page(
+ vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
if (err)
goto err_out1;
}
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 82ae49c64221..8067a4be8311 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_plane.o \
mtk_dsi.o \
mtk_mipi_tx.o \
+ mtk_mt8173_mipi_tx.o \
+ mtk_mt8183_mipi_tx.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 21851756c579..14878ebf59d7 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -19,6 +19,8 @@
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
@@ -31,7 +33,9 @@
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
-#define OVL_RDMA_MEM_GMC 0x40402020
+#define GMC_THRESHOLD_BITS 16
+#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -49,6 +53,8 @@
struct mtk_disp_ovl_data {
unsigned int addr;
+ unsigned int gmc_bits;
+ unsigned int layer_nr;
bool fmt_rgb565_is_0;
};
@@ -126,15 +132,31 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
{
- return 4;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+ return ovl->data->layer_nr;
}
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
+ unsigned int gmc_thrshd_l;
+ unsigned int gmc_thrshd_h;
+ unsigned int gmc_value;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
- writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+ gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ if (ovl->data->gmc_bits == 10)
+ gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+ else
+ gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+ gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+ writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
reg = reg | BIT(idx);
@@ -217,6 +239,24 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
mtk_ovl_layer_on(comp, idx);
}
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.config = mtk_ovl_config,
.start = mtk_ovl_start,
@@ -227,6 +267,8 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
.layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
};
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -276,7 +318,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ priv->data = of_device_get_match_data(dev);
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node,
+ priv->data->layer_nr == 4 ?
+ MTK_DISP_OVL :
+ MTK_DISP_OVL_2L);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
@@ -289,8 +336,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
- priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -316,11 +361,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = false,
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = true,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 34a731755791..b841d3706d8b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -272,6 +272,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_on(comp);
+
mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
mtk_ddp_comp_start(comp);
}
@@ -280,9 +283,18 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+ unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp);
+ unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
- mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
+
+ if (i >= comp_layer_nr) {
+ comp = mtk_crtc->ddp_comp[1];
+ local_layer = i - comp_layer_nr;
+ } else
+ local_layer = i;
+ mtk_ddp_comp_layer_config(comp, local_layer,
plane_state);
}
@@ -301,8 +313,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
+ }
+
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
@@ -327,6 +343,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
+ unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp);
+ unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
@@ -349,7 +367,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(comp, i, plane_state);
+ if (i >= comp_layer_nr) {
+ comp = mtk_crtc->ddp_comp[1];
+ local_layer = i - comp_layer_nr;
+ } else
+ local_layer = i;
+
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
plane_state->pending.config = false;
}
}
@@ -582,6 +607,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+ if (mtk_crtc->ddp_comp_nr > 1) {
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[1];
+
+ if (comp->funcs->bgclr_in_on)
+ mtk_crtc->layer_nr += mtk_ddp_comp_layer_nr(comp);
+ }
mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
sizeof(struct drm_plane),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8106a71a7404..13035c906035 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -33,12 +33,15 @@
#define DISP_REG_CONFIG_DSI_SEL 0x050
#define DISP_REG_CONFIG_DPI_SEL 0x064
-#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+#define MT2701_DISP_MUTEX0_MOD0 0x2c
+#define MT2701_DISP_MUTEX0_SOF0 0x30
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -139,12 +142,30 @@ struct mtk_disp_mutex {
bool claimed;
};
+enum mtk_ddp_mutex_sof_id {
+ DDP_MUTEX_SOF_SINGLE_MODE,
+ DDP_MUTEX_SOF_DSI0,
+ DDP_MUTEX_SOF_DSI1,
+ DDP_MUTEX_SOF_DPI0,
+ DDP_MUTEX_SOF_DPI1,
+ DDP_MUTEX_SOF_DSI2,
+ DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const bool no_clk;
+};
+
struct mtk_ddp {
struct device *dev;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
- const unsigned int *mutex_mod;
+ const struct mtk_ddp_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+ [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next,
unsigned int *addr)
@@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int sof_id;
unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- reg = MUTEX_SOF_DSI2;
+ sof_id = DDP_MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- reg = MUTEX_SOF_DSI3;
+ sof_id = DDP_MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- reg = MUTEX_SOF_DPI0;
+ sof_id = DDP_MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- reg = MUTEX_SOF_DPI1;
+ sof_id = DDP_MUTEX_SOF_DPI1;
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->mutex_mod[id];
+ reg |= 1 << ddp->data->mutex_mod[id];
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->mutex_mod[id] - 32);
+ reg |= 1 << (ddp->data->mutex_mod[id] - 32);
writel_relaxed(reg, ddp->regs + offset);
}
return;
}
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ writel_relaxed(ddp->data->mutex_sof[sof_id],
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
}
void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mutex->id));
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->mutex_mod[id]);
+ reg &= ~(1 << ddp->data->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
writel_relaxed(reg, ddp->regs + offset);
}
break;
@@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev)
for (i = 0; i < 10; i++)
ddp->mutex[i].id = i;
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ ddp->data = of_device_get_match_data(dev);
+
+ if (!ddp->data->no_clk) {
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
- ddp->mutex_mod = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, ddp);
return 0;
@@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
- { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
- { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = &mt2701_ddp_driver_data},
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = &mt2712_ddp_driver_data},
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = &mt8173_ddp_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index efa85973e46b..7f21307cda75 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -33,6 +33,18 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define DISP_CCORR_SIZE 0x0030
+
+#define DISP_DITHER_EN 0x0000
+#define DITHER_EN BIT(0)
+#define DISP_DITHER_CFG 0x0020
+#define DITHER_RELAY_MODE BIT(0)
+#define DISP_DITHER_SIZE 0x0030
+
#define DISP_GAMMA_EN 0x0000
#define DISP_GAMMA_CFG 0x0020
#define DISP_GAMMA_SIZE 0x0030
@@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
}
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
+ writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+ writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
+ writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+ writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = {
.stop = mtk_aal_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .config = mtk_ccorr_config,
+ .start = mtk_ccorr_start,
+ .stop = mtk_ccorr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .config = mtk_dither_config,
+ .start = mtk_dither_start,
+ .stop = mtk_dither_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
@@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_OVL_2L] = "ovl_2l",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_WDMA] = "wdma",
[MTK_DISP_COLOR] = "color",
+ [MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_AAL] = "aal",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DSI] = "dsi",
[MTK_DPI] = "dpi",
@@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
@@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0ad287f427cc..26441f4d1ad3 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -17,9 +17,12 @@ struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
MTK_DISP_RDMA,
MTK_DISP_WDMA,
MTK_DISP_COLOR,
+ MTK_DISP_CCORR,
+ MTK_DISP_DITHER,
MTK_DISP_AAL,
MTK_DISP_GAMMA,
MTK_DISP_UFOE,
@@ -36,8 +39,10 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
@@ -48,6 +53,8 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
@@ -77,6 +84,8 @@ struct mtk_ddp_comp_funcs {
struct mtk_plane_state *state);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
+ void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+ void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
};
struct mtk_ddp_comp {
@@ -158,6 +167,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
comp->funcs->gamma_set(comp, state);
}
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_on)
+ comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_off)
+ comp->funcs->bgclr_in_off(comp);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 352b81a7a670..84d14213d992 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
*/
if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
- &mtk_dsi_driver,
&mtk_mipi_tx_driver,
+ &mtk_dsi_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index ca672f1d140d..b04a3c2b111e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index a413f5ff442d..e9931bbbe846 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -40,6 +40,7 @@
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
+#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
@@ -73,6 +74,7 @@
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -126,7 +128,10 @@
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_CMDQ0 0x180
+#define DSI_SHADOW_DEBUG 0x190U
+#define FORCE_COMMIT BIT(0)
+#define BYPASS_SHADOW BIT(1)
+
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
@@ -135,12 +140,6 @@
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
-#define T_LPX 5
-#define T_HS_PREP 6
-#define T_HS_TRAIL 8
-#define T_HS_EXIT 7
-#define T_HS_ZERO 10
-
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
@@ -149,8 +148,33 @@
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
+struct mtk_phy_timing {
+ u32 lpx;
+ u32 da_hs_prepare;
+ u32 da_hs_zero;
+ u32 da_hs_trail;
+
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+ u32 da_hs_exit;
+
+ u32 clk_hs_zero;
+ u32 clk_hs_trail;
+
+ u32 clk_hs_prepare;
+ u32 clk_hs_post;
+ u32 clk_hs_exit;
+};
+
struct phy;
+struct mtk_dsi_driver_data {
+ const u32 reg_cmdq_off;
+ bool has_shadow_ctl;
+ bool has_size_ctl;
+};
+
struct mtk_dsi {
struct mtk_ddp_comp ddp_comp;
struct device *dev;
@@ -173,10 +197,12 @@ struct mtk_dsi {
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
+ struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
+ const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -205,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
+
+ ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
+ cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+
+ timing->lpx = NS_TO_CYCLE(60, cycle_time);
+ timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
+ timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
+ timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx;
- ui = 1000 / dsi->data_rate + 0x01;
- cycle_time = 8000 / dsi->data_rate + 0x01;
+ timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
+ timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
- timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
- timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
- T_HS_EXIT << 24;
- timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
- (NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
- NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+ timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
+ timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
+ timing->clk_hs_exit = 2 * timing->lpx;
+
+ timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+ timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+ timcon1 = timing->ta_go | timing->ta_sure << 8 |
+ timing->ta_get << 16 | timing->da_hs_exit << 24;
+ timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+ timing->clk_hs_trail << 24;
+ timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+ timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -239,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -402,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
- u32 dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -416,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ if (dsi->driver_data->has_size_ctl)
+ writel(vm->vactive << 16 | vm->hactive,
+ dsi->regs + DSI_SIZE_CON);
+
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -425,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10);
- horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+ data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+ timing->da_hs_zero + timing->da_hs_exit + 2;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 18) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 18;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ } else {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 12) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 12;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ }
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -523,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
- struct device *dev = dsi->dev;
+ struct device *dev = dsi->host.dev;
int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+ u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
@@ -545,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
break;
}
- /**
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
+ dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+ dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
@@ -585,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
}
mtk_dsi_enable(dsi);
+
+ if (dsi->driver_data->has_shadow_ctl)
+ writel(FORCE_COMMIT | BYPASS_SHADOW,
+ dsi->regs + DSI_SHADOW_DEBUG);
+
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
@@ -939,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
+ u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
@@ -958,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
}
for (i = 0; i < msg->tx_len; i++)
- writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+ mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+ (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+ tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
- mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
@@ -1050,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- goto err_ddp_comp_unregister;
- }
-
ret = mtk_dsi_create_conn_enc(drm, dsi);
if (ret) {
DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1065,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_unregister:
- mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
@@ -1078,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_destroy_conn_enc(dsi);
- mipi_dsi_host_unregister(&dsi->host);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1102,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&dsi->panel, &dsi->bridge);
if (ret)
- return ret;
+ goto err_unregister_host;
+
+ dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
dev_err(dev, "Failed to get engine clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
dev_err(dev, "Failed to get digital clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1134,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ ret = comp_id;
+ goto err_unregister_host;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
&mtk_dsi_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+ ret = irq_num;
+ goto err_unregister_host;
}
irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1168,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev)
IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- return -EPROBE_DEFER;
+ goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
- return component_add(&pdev->dev, &mtk_dsi_component_ops);
+ ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+ goto err_unregister_host;
+ }
+
+ return 0;
+
+err_unregister_host:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
}
static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1184,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev)
mtk_output_dsi_disable(dsi);
component_del(&pdev->dev, &mtk_dsi_component_ops);
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ .reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi" },
- { .compatible = "mediatek,mt8173-dsi" },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = &mt8183_dsi_driver_data },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1842dc2caae9..e4d34484ecc8 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -3,292 +3,39 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-
-#define MIPITX_DSI_CON 0x00
-#define RG_DSI_LDOCORE_EN BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
-#define RG_DSI_LPTX_CLMP_EN BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE 0x04
-#define MIPITX_DSI_DATA_LANE0 0x08
-#define MIPITX_DSI_DATA_LANE1 0x0c
-#define MIPITX_DSI_DATA_LANE2 0x10
-#define MIPITX_DSI_DATA_LANE3 0x14
-#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
-
-#define MIPITX_DSI_TOP_CON 0x40
-#define RG_DSI_LNT_INTR_EN BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
-
-#define MIPITX_DSI_BG_CON 0x44
-#define RG_DSI_BG_CORE_EN BIT(0)
-#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0 0x50
-#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN BIT(11)
-#define RG_DSI_MPPLL_VOD_EN BIT(12)
-
-#define MIPITX_DSI_PLL_CON1 0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2 0x58
-
-#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR 0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
-
-#define MIPITX_DSI_SW_CTRL 0x80
-#define SW_CTRL_EN BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0 0x84
-#define SW_LNTC_LPTX_PRE_OE BIT(0)
-#define SW_LNTC_LPTX_OE BIT(1)
-#define SW_LNTC_LPTX_P BIT(2)
-#define SW_LNTC_LPTX_N BIT(3)
-#define SW_LNTC_HSTX_PRE_OE BIT(4)
-#define SW_LNTC_HSTX_OE BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK BIT(6)
-#define SW_LNT0_LPTX_PRE_OE BIT(7)
-#define SW_LNT0_LPTX_OE BIT(8)
-#define SW_LNT0_LPTX_P BIT(9)
-#define SW_LNT0_LPTX_N BIT(10)
-#define SW_LNT0_HSTX_PRE_OE BIT(11)
-#define SW_LNT0_HSTX_OE BIT(12)
-#define SW_LNT0_LPRX_EN BIT(13)
-#define SW_LNT1_LPTX_PRE_OE BIT(14)
-#define SW_LNT1_LPTX_OE BIT(15)
-#define SW_LNT1_LPTX_P BIT(16)
-#define SW_LNT1_LPTX_N BIT(17)
-#define SW_LNT1_HSTX_PRE_OE BIT(18)
-#define SW_LNT1_HSTX_OE BIT(19)
-#define SW_LNT2_LPTX_PRE_OE BIT(20)
-#define SW_LNT2_LPTX_OE BIT(21)
-#define SW_LNT2_LPTX_P BIT(22)
-#define SW_LNT2_LPTX_N BIT(23)
-#define SW_LNT2_HSTX_PRE_OE BIT(24)
-#define SW_LNT2_HSTX_OE BIT(25)
-
-struct mtk_mipitx_data {
- const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
- struct device *dev;
- void __iomem *regs;
- u32 data_rate;
- const struct mtk_mipitx_data *driver_data;
- struct clk_hw pll_hw;
- struct clk *pll;
-};
+#include "mtk_mipi_tx.h"
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp & ~bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp | bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
{
u32 temp = readl(mipi_tx->regs + offset);
writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
}
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- u8 txdiv, txdiv0, txdiv1;
- u64 pcw;
-
- dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
- if (mipi_tx->data_rate >= 500000000) {
- txdiv = 1;
- txdiv0 = 0;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 250000000) {
- txdiv = 2;
- txdiv0 = 1;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 125000000) {
- txdiv = 4;
- txdiv0 = 2;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate > 62000000) {
- txdiv = 8;
- txdiv0 = 2;
- txdiv1 = 1;
- } else if (mipi_tx->data_rate >= 50000000) {
- txdiv = 16;
- txdiv0 = 2;
- txdiv1 = 2;
- } else {
- return -EINVAL;
- }
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- usleep_range(30, 100);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
-
- /*
- * PLL PCW config
- * PCW bit 24~30 = integer part of pcw
- * PCW bit 0~23 = fractional part of pcw
- * pcw = data_Rate*4*txdiv/(Ref_clk*2);
- * Post DIV =4, so need data_Rate*4
- * Ref_clk is 26MHz
- */
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
- usleep_range(20, 100);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
-
- return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
- dev_dbg(mipi_tx->dev, "unprepare\n");
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
@@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
- .prepare = mtk_mipi_tx_pll_prepare,
- .unprepare = mtk_mipi_tx_pll_unprepare,
- .round_rate = mtk_mipi_tx_pll_round_rate,
- .set_rate = mtk_mipi_tx_pll_set_rate,
- .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- return 0;
-}
-
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
- mtk_mipi_tx_power_on_signal(phy);
-
+ mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
- mtk_mipi_tx_power_off_signal(phy);
+ mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll);
@@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
struct resource *mem;
- struct clk *ref_clk;
const char *ref_clk_name;
+ struct clk *ref_clk;
struct clk_init_data clk_init = {
- .ops = &mtk_mipi_tx_pll_ops,
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
@@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get reference clock: %d\n", ret);
return ret;
}
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
if (IS_ERR(mipi_tx->pll)) {
@@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8)
-};
-
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx",
.data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx",
.data = &mt8173_mipitx_data },
- {},
+ { .compatible = "mediatek,mt8183-mipi-tx",
+ .data = &mt8183_mipitx_data },
+ { },
};
struct platform_driver mtk_mipi_tx_driver = {
@@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = {
.of_match_table = mtk_mipi_tx_match,
},
};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 000000000000..413f35d86219
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+ const struct clk_ops *mipi_tx_clk_ops;
+ void (*mipi_tx_enable_signal)(struct phy *phy);
+ void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+ u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 000000000000..f18db14d8b63
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ u8 txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 000000000000..91f08a351fd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON 0x000c
+#define RG_DSI_CPHY_T1DRV_EN BIT(0)
+#define RG_DSI_ANA_CK_SEL BIT(1)
+#define RG_DSI_PHY_CK_SEL BIT(2)
+#define RG_DSI_CPHY_EN BIT(3)
+#define RG_DSI_PHYCK_INV_EN BIT(4)
+#define RG_DSI_PWR04_EN BIT(5)
+#define RG_DSI_BG_LPF_EN BIT(6)
+#define RG_DSI_BG_CORE_EN BIT(7)
+#define RG_DSI_PAD_TIEL_SEL BIT(8)
+
+#define MIPITX_PLL_PWR 0x0028
+#define MIPITX_PLL_CON0 0x002c
+#define MIPITX_PLL_CON1 0x0030
+#define MIPITX_PLL_CON2 0x0034
+#define MIPITX_PLL_CON3 0x0038
+#define MIPITX_PLL_CON4 0x003c
+#define RG_DSI_PLL_IBIAS (3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN 0x0144
+#define MIPITX_D0_SW_CTL_EN 0x0244
+#define MIPITX_CK_CKMODE_EN 0x0328
+#define DSI_CK_CKMODE_EN BIT(0)
+#define MIPITX_CK_SW_CTL_EN 0x0344
+#define MIPITX_D1_SW_CTL_EN 0x0444
+#define MIPITX_D3_SW_CTL_EN 0x0544
+#define DSI_SW_CTL_EN BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
+
+#define RG_DSI_PLL_EN BIT(4)
+#define RG_DSI_PLL_POSDIV (0x7 << 8)
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 2000000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ } else if (mipi_tx->data_rate >= 1000000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ } else if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ } else if (mipi_tx->data_rate > 250000000) {
+ txdiv = 8;
+ txdiv0 = 3;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 16;
+ txdiv0 = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ udelay(1);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+ txdiv0 << 8);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .enable = mtk_mipi_tx_pll_enable,
+ .disable = mtk_mipi_tx_pll_disable,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* BG_LPF_EN / BG_CORE_EN */
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ usleep_range(30, 100);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+
+ /* Switch OFF each Lane */
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Switch ON each Lane */
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 022286dc6ab2..3bb7ffe5fc39 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -977,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+ dw_plat_data->use_drm_infoframe = true;
+
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4f9df3b93598..397f8b0a9af8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static const struct file_operations mgag200_driver_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 663ff9f4fac9..1e7b1be25bb0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -26,6 +26,8 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..0d9657cc70db 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -89,7 +89,6 @@ struct edp_ctrl {
/* edid raw data */
struct edid *edid;
- struct drm_dp_link dp_link;
struct drm_dp_aux *drm_aux;
/* dpcd raw data */
@@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
u32 prate;
u32 lrate;
u32 bpp;
- u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
u8 lane;
prate = ctrl->pixel_rate;
@@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
* By default, use the maximum link rate and minimum lane count,
* so that we can do rate down shift during link training.
*/
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
prate *= bpp;
prate /= 8; /* in kByte */
@@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl)
data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
- if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
depth = EDP_6BIT;
@@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
rate = ctrl->link_rate;
lane = ctrl->lane_cnt;
- max_lane = ctrl->dp_link.num_lanes;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
bpp = ctrl->color_depth * 3;
prate = ctrl->pixel_rate;
@@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
static int edp_do_link_train(struct edp_ctrl *ctrl)
{
+ u8 values[2];
int ret;
- struct drm_dp_link dp_link;
DBG("");
/*
* Set the current link rate and lane cnt to panel. They may have been
* adjusted and the values are different from them in DPCD CAP
*/
- dp_link.num_lanes = ctrl->lane_cnt;
- dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
- dp_link.capabilities = ctrl->dp_link.capabilities;
- if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
return EDP_TRAIN_FAIL;
ctrl->v_level = 0; /* start from default level */
@@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work)
{
struct edp_ctrl *ctrl = container_of(
work, struct edp_ctrl, on_work);
+ u8 value;
int ret;
mutex_lock(&ctrl->dev_mutex);
@@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work)
edp_ctrl_link_enable(ctrl, 1);
edp_ctrl_irq_enable(ctrl, 1);
- ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
- if (ret)
- goto fail;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
ctrl->power_on = true;
@@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work)
edp_state_ctrl(ctrl, 0);
- drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
edp_ctrl_irq_enable(ctrl, 0);
@@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
edp_ctrl_irq_enable(ctrl, 1);
}
- ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
- if (ret) {
- pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
- goto disable_ret;
- }
-
/* Initialize link rate as panel max link rate */
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
if (!ctrl->edid) {
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 12421567af89..b69ace8bf526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
reg = readl(mxsfb->base + LCDC_CTRL);
- if (mxsfb->connector.display_info.num_bus_formats)
- bus_format = mxsfb->connector.display_info.bus_formats[0];
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
@@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
+ struct drm_device *drm = mxsfb->pipe.crtc.dev;
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
int err;
@@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
mxsfb->base + mxsfb->devdata->transfer_count);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index e8506335cd15..497cf443a9af 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_connector *connector;
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
struct drm_device *drm = pipe->plane.dev;
+ if (!mxsfb->connector) {
+ list_for_each_entry(connector,
+ &drm->mode_config.connector_list,
+ head)
+ if (connector->encoder == &mxsfb->pipe.encoder) {
+ mxsfb->connector = connector;
+ break;
+ }
+ }
+
+ if (!mxsfb->connector) {
+ dev_warn(drm->dev, "No connector attached, using default\n");
+ mxsfb->connector = &mxsfb->panel_connector;
+ }
+
pm_runtime_get_sync(drm->dev);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
@@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
+
+ if (mxsfb->connector != &mxsfb->panel_connector)
+ mxsfb->connector = NULL;
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
- &mxsfb->connector);
+ mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank;
}
- ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel\n");
- goto err_vblank;
+ /*
+ * Attach panel only if there is one.
+ * If there is no panel attach, it must be a bridge. In this case, we
+ * need a reference to its connector for a proper initialization.
+ * We will do this check in pipe->enable(), since the connector won't
+ * be attached to an encoder until then.
+ */
+
+ if (mxsfb->panel) {
+ ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
+ goto err_vblank;
+ }
+ } else if (mxsfb->bridge) {
+ ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
+ mxsfb->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
+ }
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index d975300dca05..0b65b5194a9c 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -27,8 +27,10 @@ struct mxsfb_drm_private {
struct clk *clk_disp_axi;
struct drm_simple_display_pipe pipe;
- struct drm_connector connector;
+ struct drm_connector panel_connector;
+ struct drm_connector *connector;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
int mxsfb_setup_crtc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index be36f4d6cc96..4eb94744c526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -21,7 +21,8 @@
static struct mxsfb_drm_private *
drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
{
- return container_of(connector, struct mxsfb_drm_private, connector);
+ return container_of(connector, struct mxsfb_drm_private,
+ panel_connector);
}
static int mxsfb_panel_get_modes(struct drm_connector *connector)
@@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
+ &mxsfb->panel, &mxsfb->bridge);
if (ret)
return ret;
- mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector.polled = 0;
- drm_connector_helper_add(&mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, &mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (!ret)
- mxsfb->panel = panel;
+ if (mxsfb->panel) {
+ mxsfb->connector = &mxsfb->panel_connector;
+ mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector->polled = 0;
+ drm_connector_helper_add(mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a13924ae1992..549486f1d937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
-static const struct drm_connector_helper_funcs
-nv50_mstc_help = {
- .get_modes = nv50_mstc_get_modes,
- .mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
- .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
- .atomic_check = nv50_mstc_atomic_check,
-};
-
-static enum drm_connector_status
-nv50_mstc_detect(struct drm_connector *connector, bool force)
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- enum drm_connector_status conn_status;
int ret;
if (drm_connector_is_unregistered(connector))
@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
- conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
- mstc->port);
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
- return conn_status;
+ return ret;
}
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
- .detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm)
+nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm);
+ nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3a5db17bc5c7..5b413588b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1130,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
+
+ return NVIF_NOTIFY_KEEP;
+ }
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
@@ -1150,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP;
}
- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
- } else {
- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
-
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ nv50_mstm_remove(nv_encoder->dp.mstm);
}
+ drm_helper_hpd_irq_event(connector->dev);
+
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6f038511a03a..53f9bceaf17a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter;
int ret;
+ /*
+ * Enable hotplug interrupts (done as early as possible, since we need
+ * them for MST)
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nvif_notify_get(&conn->hpd);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/
drm_kms_helper_poll_enable(dev);
- /* enable hotplug interrupts */
- drm_connector_list_iter_begin(dev, &conn_iter);
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nvif_notify_get(&conn->hpd);
- }
- drm_connector_list_iter_end(&conn_iter);
-
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..da16ea095f13 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
static void dsi_proto_timings(struct dsi_data *dsi)
{
- unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
@@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi)
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index c4ffe96e28bc..ea5d5c228534 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -676,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -738,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 96f5cd17768c..ff4d35c8771f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -807,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -850,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 835e6654fa82..43c1d096b021 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1b8b5108caf8..9aeab81dfb90 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
- struct plane *plane = &omap_fb->planes[0];
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
- plane = &omap_fb->planes[1];
-
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 08f539efddfb..e518d93ca6df 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -67,7 +67,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- u32 dma_addr_cnt;
+ refcount_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
- if (omap_obj->dma_addr_cnt == 0) {
+ if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
+ refcount_set(&omap_obj->dma_addr_cnt, 1);
+
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
+ } else {
+ refcount_inc(&omap_obj->dma_addr_cnt);
}
- omap_obj->dma_addr_cnt++;
-
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -832,38 +836,46 @@ fail:
}
/**
+ * omap_gem_unpin_locked() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * omap_gem_unpin() without locking.
+ */
+static void omap_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret;
+
+ if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->dma_addr = 0;
+ omap_obj->block = NULL;
+ }
+}
+
+/**
* omap_gem_unpin() - Unpin a GEM object from memory
* @obj: the GEM object
*
* Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
- * reference-counted, the actualy unpin will only be performed when the number
+ * reference-counted, the actual unpin will only be performed when the number
* of calls to this function matches the number of calls to omap_gem_pin().
*/
void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret;
mutex_lock(&omap_obj->lock);
-
- if (omap_obj->dma_addr_cnt > 0) {
- omap_obj->dma_addr_cnt--;
- if (omap_obj->dma_addr_cnt == 0) {
- ret = tiler_unpin(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not unpin pages: %d\n", ret);
- }
- ret = tiler_release(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not release unmap: %d\n", ret);
- }
- omap_obj->dma_addr = 0;
- omap_obj->block = NULL;
- }
- }
-
+ omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
@@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
- if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
+ off, &omap_obj->dma_addr,
+ refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
- WARN_ON(omap_obj->dma_addr_cnt > 0);
+ WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj)
kfree(omap_obj);
}
+static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ switch (flags & OMAP_BO_CACHE_MASK) {
+ case OMAP_BO_CACHED:
+ case OMAP_BO_WC:
+ case OMAP_BO_CACHE_MASK:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (flags & OMAP_BO_TILED_MASK) {
+ if (!priv->usergart)
+ return false;
+
+ switch (flags & OMAP_BO_TILED_MASK) {
+ case OMAP_BO_TILED_8:
+ case OMAP_BO_TILED_16:
+ case OMAP_BO_TILED_32:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
- /* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
- if (!priv->usergart) {
- dev_err(dev->dev, "Tiled buffers require DMM\n");
- return NULL;
- }
+ if (!omap_gem_validate_flags(dev, flags))
+ return NULL;
+ /* Validate the flags and compute the memory and cache flags. */
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
- flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/*
@@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
- * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
- * tiled. However, to lower the pressure on memory allocation,
- * use contiguous memory only if no TILER is available.
+ * If we don't have DMM, we must allocate scanout buffers
+ * from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
@@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index e8c3ae7ac77e..7344bb61936c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index f984a5189fbf..7a1385e834f0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -219,9 +219,17 @@ static const struct of_device_id lb035q02_of_match[] = {
MODULE_DEVICE_TABLE(of, lb035q02_of_match);
+static const struct spi_device_id lb035q02_ids[] = {
+ { "lb035q02", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, lb035q02_ids);
+
static struct spi_driver lb035q02_driver = {
.probe = lb035q02_probe,
.remove = lb035q02_remove,
+ .id_table = lb035q02_ids,
.driver = {
.name = "panel-lg-lb035q02",
.of_match_table = lb035q02_of_match,
@@ -230,7 +238,6 @@ static struct spi_driver lb035q02_driver = {
module_spi_driver(lb035q02_driver);
-MODULE_ALIAS("spi:lgphilips,lb035q02");
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index c4bc7fa85fad..fd593532ab23 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -229,9 +229,17 @@ static const struct of_device_id nl8048_of_match[] = {
MODULE_DEVICE_TABLE(of, nl8048_of_match);
+static const struct spi_device_id nl8048_ids[] = {
+ { "nl8048hl11", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, nl8048_ids);
+
static struct spi_driver nl8048_driver = {
.probe = nl8048_probe,
.remove = nl8048_remove,
+ .id_table = nl8048_ids,
.driver = {
.name = "panel-nec-nl8048hl11",
.pm = &nl8048_pm_ops,
@@ -241,7 +249,6 @@ static struct spi_driver nl8048_driver = {
module_spi_driver(nl8048_driver);
-MODULE_ALIAS("spi:nec,nl8048hl11");
MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 6f5ce5867ff9..d6387d8f88a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -683,9 +683,17 @@ static const struct of_device_id acx565akm_of_match[] = {
MODULE_DEVICE_TABLE(of, acx565akm_of_match);
+static const struct spi_device_id acx565akm_ids[] = {
+ { "acx565akm", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, acx565akm_ids);
+
static struct spi_driver acx565akm_driver = {
.probe = acx565akm_probe,
.remove = acx565akm_remove,
+ .id_table = acx565akm_ids,
.driver = {
.name = "panel-sony-acx565akm",
.of_match_table = acx565akm_of_match,
@@ -694,7 +702,6 @@ static struct spi_driver acx565akm_driver = {
module_spi_driver(acx565akm_driver);
-MODULE_ALIAS("spi:sony,acx565akm");
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 1d08d03f509d..c44d6a65c0aa 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -374,8 +374,7 @@ static const struct of_device_id td028ttec1_of_match[] = {
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
static const struct spi_device_id td028ttec1_ids[] = {
- { "tpo,td028ttec1", 0},
- { "toppoly,td028ttec1", 0 },
+ { "td028ttec1", 0 },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 416aebd23b6a..621b65feec07 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -490,9 +490,17 @@ static const struct of_device_id td043mtea1_of_match[] = {
MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
+static const struct spi_device_id td043mtea1_ids[] = {
+ { "td043mtea1", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
+
static struct spi_driver td043mtea1_driver = {
.probe = td043mtea1_probe,
.remove = td043mtea1_remove,
+ .id_table = td043mtea1_ids,
.driver = {
.name = "panel-tpo-td043mtea1",
.pm = &td043mtea1_pm_ops,
@@ -502,7 +510,6 @@ static struct spi_driver td043mtea1_driver = {
module_spi_driver(td043mtea1_driver);
-MODULE_ALIAS("spi:tpo,td043mtea1");
MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index 536a0d4f8d29..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -10,3 +10,5 @@
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
+
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index c1eb8cfe6aeb..4c4e8a30a1ac 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -13,99 +13,42 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
int err;
- opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- if (pfdev->regulator)
- regulator_set_voltage(pfdev->regulator,
- pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -121,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- *freq = pfdev->devfreq.cur_freq;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
@@ -137,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@@ -146,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@@ -176,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- int i;
-
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -196,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -206,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
- pfdev->devfreq.slot[slot].time_last_update = now;
+ pfdev->devfreq.time_last_update = now;
+}
+
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index e3bc63e82843..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 9c39b9794811..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index bc2ddeb55f5d..9d086133a84e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index acb07fe06580..deca0c30bbd4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f67ed925c0ef..8822ec13a0d6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+ pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a58551668d9a..d411eb6c8eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -381,13 +380,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
- mutex_lock(&pfdev->reset_lock);
+ if (!mutex_trylock(&pfdev->reset_lock))
+ return;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ drm_sched_stop(sched, sched_job);
+ if (js != i)
+ /* Ensure any timeouts on other slots have finished */
+ cancel_delayed_work_sync(&sched->work_tdr);
+ }
- if (sched_job)
- drm_sched_increase_karma(sched_job);
+ drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
@@ -398,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* panfrost_core_dump(pfdev); */
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@@ -463,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_transition(pfdev, j);
+ panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -564,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 024771a4083e..703ddc803c55 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
-pl111_mode_valid(struct drm_crtc *crtc,
+pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
- struct drm_device *drm = crtc->dev;
+ struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d0d691b31f4a..ca3f51c2a8fe 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -4,6 +4,7 @@ config DRM_QXL
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
+ select DRM_TTM_HELPER
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 483b4c57554a..1d601f57a6ba 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
-static const struct file_operations qxl_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .mmap = qxl_mmap,
-};
+DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index d4051409ce64..27e45a2d6b52 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -355,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index c013c516f561..ab72dc3476e9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ u32 pflag = 0;
unsigned int i;
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
+
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
@@ -86,6 +91,7 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
.get_sg_table = qxl_gem_prime_get_sg_table,
.vmap = qxl_gem_prime_vmap,
.vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
.print_info = drm_gem_ttm_print_info,
};
@@ -161,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@@ -173,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
+ ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@@ -206,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 312216caeea2..2feca734c7b1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index cbc6c2ba8630..16a5e903533d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static struct vm_operations_struct qxl_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- vm_fault_t ret;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- ret = ttm_vm_ops->fault(vmf);
- return ret;
-}
-
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct qxl_device *qdev = file_priv->minor->dev->dev_private;
-
- if (qdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- filp->private_data, vma->vm_pgoff);
-
- r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- qxl_ttm_vm_ops = *ttm_vm_ops;
- qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
- }
- vma->vm_ops = &qxl_ttm_vm_ops;
- return 0;
-}
-
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct qxl_bo *qbo = to_qxl_bo(bo);
-
- return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
- filp->private_data);
-}
-
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
- .verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
@@ -369,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct qxl_device *rdev = dev->dev_private;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b9aea5776d3d..72db2b41e96d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
return;
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2994f07fbad9..ee28f5b3785e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base;
}
+static int
+radeon_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ radeon_connector->port);
+}
+
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder,
+ .detect_ctx = radeon_dp_mst_detect,
};
-static enum drm_connector_status
-radeon_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
-}
-
static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
};
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 19590fff135c..888e0f384c61 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -379,19 +379,11 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- struct drm_device *ddev = pci_get_drvdata(pdev);
-
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
-
- /* Some adapters need to be suspended before a
- * shutdown occurs in order to prevent an error
- * during kexec.
- */
- radeon_suspend_kms(ddev, true, true, false);
}
static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b2b076606f54..67298a0739cb 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2abe1eab471f..140d94cc080d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 9c93eb4fad8b..f266c17b907a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.dpll_mask = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774B1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
+ { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 2dc9caee8767..0d59f390de19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsps[j].crtcs_mask |= BIT(i);
- /* Store the VSP pointer and pipe index in the CRTC. */
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'vsps' specifier isn't present, default
+ * to 0 to remain compatible with older DT bindings.
+ */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3fc7e6899cab..8c6c172bbf2e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,6 +16,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_LANES,
+ .pll_setup = rcar_lvds_pll_setup_gen2,
+};
+
+static const struct soc_device_attribute lvds_quirk_matches[] = {
+ {
+ .soc_id = "r8a7790", .revision = "ES1.*",
+ .data = &rcar_lvds_r8a7790es1_info,
+ },
+ { /* sentinel */ }
+};
+
static int rcar_lvds_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
+ attr = soc_device_match(lvds_quirk_matches);
+ if (attr)
+ lvds->info = attr->data;
+
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
-static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
- .gen = 2,
- .quirks = RCAR_LVDS_QUIRK_LANES,
- .pll_setup = rcar_lvds_pll_setup_gen2,
-};
-
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
- { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index d505ea7d5384..eed594bd38d3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
- dp->link.rate = 0;
- dp->link.num_lanes = 0;
+ dp->max_lanes = 0;
+ dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
@@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
- if (!port || !dp->link.rate || !dp->link.num_lanes)
+ if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
@@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
- unsigned int rate = dp->link.rate;
- unsigned int lanes = dp->link.num_lanes;
+ unsigned int rate = dp->max_rate;
+ unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
@@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* If training result is changed, update the video config */
if (mode->clock &&
- (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ (rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index b85ea89eb60b..83c4586665b4 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -92,9 +92,10 @@ struct cdn_dp_device {
struct reset_control *core_rst;
struct audio_info audio_info;
struct video_info video_info;
- struct drm_dp_link link;
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
+ u8 max_lanes;
+ u8 max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 077c87021908..7361c07cb4a7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
if (ret)
goto err_get_training_status;
- dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]);
- dp->link.num_lanes = status[1];
+ dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
+ dp->max_lanes = status[1];
err_get_training_status:
if (ret)
@@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
return ret;
}
- DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
- dp->link.num_lanes);
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
+ dp->max_lanes);
return ret;
}
@@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
- link_rate = dp->link.rate / 1000;
+ link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
@@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
do {
tu_size_reg += 2;
symbol = tu_size_reg * mode->clock * bit_per_pix;
- do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
- mode->clock, dp->link.num_lanes,
- link_rate);
+ mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
@@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
- val /= (dp->link.num_lanes * link_rate);
+ val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
@@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
u32 val;
if (audio->channels == 2) {
- if (dp->link.num_lanes == 1)
+ if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 906891b03a38..7f56d8c3491d 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
+ .use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
@@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
+ .use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..cdb401f4283d 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..7582d0e6a60a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
-struct rockchip_gem_object *
+static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 613404f86668..d04b3492bdac 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -139,6 +139,7 @@ struct vop {
uint32_t *regsbak;
void __iomem *regs;
+ void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
@@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
+ unsigned long rate;
- adjusted_mode->clock =
- DIV_ROUND_UP(clk_round_rate(vop->dclk,
- adjusted_mode->clock * 1000), 1000);
+ /*
+ * Clock craziness.
+ *
+ * Key points:
+ *
+ * - DRM works in in kHz.
+ * - Clock framework works in Hz.
+ * - Rockchip's clock driver picks the clock rate that is the
+ * same _OR LOWER_ than the one requested.
+ *
+ * Action plan:
+ *
+ * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
+ * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
+ * make 60000 kHz then the clock framework will actually give us
+ * the right clock.
+ *
+ * NOTE: if the PLL (maybe through a divider) could actually make
+ * a clock rate 999 Hz higher instead of the one we want then this
+ * could be a problem. Unfortunately there's not much we can do
+ * since it's baked into DRM to use kHz. It shouldn't matter in
+ * practice since Rockchip PLLs are controlled by tables and
+ * even if there is a divider in the middle I wouldn't expect PLL
+ * rates in the table that are just a few kHz different.
+ *
+ * 2. Get the clock framework to round the rate for us to tell us
+ * what it will actually make.
+ *
+ * 3. Store the rounded up rate so that we don't need to worry about
+ * this in the actual clk_set_rate().
+ */
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
+static bool vop_dsp_lut_is_enabled(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
+}
+
+static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
+{
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ u32 word;
+
+ word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ writel(word, vop->lut_regs + i * 4);
+ }
+}
+
+static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_crtc_state *state = crtc->state;
+ unsigned int idle;
+ int ret;
+
+ if (!vop->lut_regs)
+ return;
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+
+ spin_lock(&vop->reg_lock);
+ vop_crtc_write_gamma_lut(vop, crtc);
+ VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ /*
+ * Only update GAMMA if the 'active' flag is not changed,
+ * otherwise it's updated by .atomic_enable.
+ */
+ if (crtc->state->color_mgmt_changed &&
+ !crtc->state->active_changed)
+ vop_crtc_gamma_set(vop, crtc, old_crtc_state);
+}
+
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
-
- pin_pol = BIT(DCLK_INVERT);
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
+ pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
@@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
@@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop)
synchronize_irq(vop->irq);
}
+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->lut_regs && crtc_state->color_mgmt_changed &&
+ crtc_state->gamma_lut) {
+ unsigned int len;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_fixup = vop_crtc_mode_fixup,
+ .atomic_check = vop_crtc_atomic_check,
+ .atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
@@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+ if (vop->lut_regs) {
+ drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
+ }
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
@@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ if (!vop_data->lut_size) {
+ DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ return -EINVAL;
+ }
+ vop->lut_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->lut_regs))
+ return PTR_ERR(vop->lut_regs);
+ }
+
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 2149a889c29d..0b3d18c457b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -46,10 +46,15 @@ struct vop_modeset {
struct vop_output {
struct vop_reg pin_pol;
struct vop_reg dp_pin_pol;
+ struct vop_reg dp_dclk_pol;
struct vop_reg edp_pin_pol;
+ struct vop_reg edp_dclk_pol;
struct vop_reg hdmi_pin_pol;
+ struct vop_reg hdmi_dclk_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg mipi_dclk_pol;
struct vop_reg rgb_pin_pol;
+ struct vop_reg rgb_dclk_pol;
struct vop_reg dp_en;
struct vop_reg edp_en;
struct vop_reg hdmi_en;
@@ -67,6 +72,7 @@ struct vop_common {
struct vop_reg dither_down_mode;
struct vop_reg dither_down_en;
struct vop_reg dither_up;
+ struct vop_reg dsp_lut_en;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
@@ -170,6 +176,7 @@ struct vop_data {
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
+ unsigned int lut_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
@@ -294,8 +301,7 @@ enum dither_down_mode_sel {
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
- DEN_NEGATIVE = 2,
- DCLK_INVERT = 3
+ DEN_NEGATIVE = 2
};
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1494be14471..7a9d979c8d5d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -16,6 +16,7 @@
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
+#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
@@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = {
};
static const struct vop_output px30_output = {
- .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
- .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
@@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = {
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
@@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = {
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .lut_size = 1024,
};
static const int rk3368_vop_intrs[] = {
@@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = {
};
static const struct vop_output rk3368_output = {
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
@@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = {
};
static const struct vop_output rk3399_output = {
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
@@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = {
};
static const struct vop_output rk3328_output = {
+ .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
- .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 54977408f574..8b45c3a1b84e 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops drm_sched_fence_ops_finished = {
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9a0ee74d82dc..d4cc7289147e 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -622,43 +622,41 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
*/
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_job *job;
unsigned long flags;
/* Don't destroy jobs while the timeout worker is running */
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr))
- return;
-
+ return NULL;
- while (!list_empty(&sched->ring_mirror_list)) {
- struct drm_sched_job *job;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
- job = list_first_entry(&sched->ring_mirror_list,
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (!dma_fence_is_signaled(&job->s_fence->finished))
- break;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(job);
+ } else {
+ job = NULL;
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
}
- /* queue timeout for next job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ return job;
}
/**
@@ -698,12 +696,19 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
- (drm_sched_cleanup_jobs(sched),
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop()));
+ kthread_should_stop());
+
+ if (cleanup_job) {
+ sched->ops->free_job(cleanup_job);
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
+ }
if (!entity)
continue;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a44dca4b0219..e8a317d5ba19 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
plat_data->mode_valid = hdmi->quirks->mode_valid;
+ plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+ .use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d707c9171824..8e64945167e9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
+ unsigned int use_drm_infoframe : 1;
};
struct sun8i_dw_hdmi {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1d1269fde3c1..5043dcaf1cf9 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 33c463e8d49f..d6cf202414f0 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,7 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ dp.o \
hub.o \
plane.o \
dc.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbf57bc3cdab..5b1f9ff97576 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
+ window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
@@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
return;
}
- value |= (bo->paddr >> 10) & 0x3fffff;
+ value |= (bo->iova >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->paddr >> 32) & 0x3;
+ value = (bo->iova >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- dc->group = host1x_client_iommu_attach(client, true);
- if (IS_ERR(dc->group)) {
- err = PTR_ERR(dc->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
cleanup:
@@ -2083,7 +2090,7 @@ cleanup:
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
@@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
@@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 0c4d17851f47..3d8ddccd758f 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
-
- struct iommu_group *group;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000000..70dfb7d1dec5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ drm_dp_get_adjust_request_post_cursor(status, i);
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000000..cb12ed0c54e7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index a0f6f9b0d258..622cdf1ad246 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
@@ -22,6 +23,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
@@ -29,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
+ const struct tegra_dpaux_soc *soc;
+
void __iomem *regs;
int irq;
@@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
+ u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
@@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
- msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
- msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
- msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
- msg->reply = DP_AUX_I2C_REPLY_NACK;
+ reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
- msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
@@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
- if (WARN_ON(count != msg->size))
- count = min_t(size_t, count, msg->size);
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
+ msg->reply = reply;
+
+out:
return ret;
}
@@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
@@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
@@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
+
+ dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
@@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
static const struct of_device_id tegra_dpaux_of_match[] = {
- { .compatible = "nvidia,tegra194-dpaux", },
- { .compatible = "nvidia,tegra186-dpaux", },
- { .compatible = "nvidia,tegra210-dpaux", },
- { .compatible = "nvidia,tegra124-dpaux", },
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
@@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
- err = regulator_enable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_connected) {
- enable_irq(dpaux->irq);
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
}
- return -ETIMEDOUT;
+ enable_irq(dpaux->irq);
+ return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
@@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
- err = regulator_disable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_disconnected) {
- dpaux->output = NULL;
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
}
- return -ETIMEDOUT;
+ return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
@@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
-
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
-{
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern)
-{
- u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
- u8 status[DP_LINK_STATUS_SIZE], values[4];
- unsigned int i;
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
- if (err < 0)
- return err;
-
- if (tp == DP_TRAINING_PATTERN_DISABLE)
- return 0;
-
- for (i = 0; i < link->num_lanes; i++)
- values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
- DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_MAX_SWING_REACHED |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
- link->num_lanes);
- if (err < 0)
- return err;
-
- usleep_range(500, 1000);
-
- err = drm_dp_dpcd_read_link_status(aux, status);
- if (err < 0)
- return err;
-
- switch (tp) {
- case DP_TRAINING_PATTERN_1:
- if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- case DP_TRAINING_PATTERN_2:
- if (!drm_dp_channel_eq_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- default:
- dev_err(aux->dev, "unsupported training pattern %u\n", tp);
- return -EINVAL;
- }
-
- err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6fb7d74ff553..56e5e7a5c108 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -20,10 +20,6 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "drm.h"
#include "gem.h"
@@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra;
- int err;
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- if (iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (!tegra->domain) {
- err = -ENOMEM;
- goto free;
- }
-
- err = iova_cache_get();
- if (err < 0)
- goto domain;
- }
-
- mutex_init(&tegra->clients_lock);
- INIT_LIST_HEAD(&tegra->clients);
-
- drm->dev_private = tegra;
- tegra->drm = drm;
-
- drm_mode_config_init(drm);
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.allow_fb_modifiers = true;
-
- drm->mode_config.normalize_zpos = true;
-
- drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
- drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
-
- err = tegra_drm_fb_prepare(drm);
- if (err < 0)
- goto config;
-
- drm_kms_helper_poll_init(drm);
-
- err = host1x_device_init(device);
- if (err < 0)
- goto fbdev;
-
- if (tegra->domain) {
- u64 carveout_start, carveout_end, gem_start, gem_end;
- u64 dma_mask = dma_get_mask(&device->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- start = tegra->domain->geometry.aperture_start & dma_mask;
- end = tegra->domain->geometry.aperture_end & dma_mask;
-
- gem_start = start;
- gem_end = end - CARVEOUT_SZ;
- carveout_start = gem_end + 1;
- carveout_end = end;
-
- order = __ffs(tegra->domain->pgsize_bitmap);
- init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order);
-
- tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
- tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
- drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
- mutex_init(&tegra->mm_lock);
-
- DRM_DEBUG("IOMMU apertures:\n");
- DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
- DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
- carveout_end);
- }
-
- if (tegra->hub) {
- err = tegra_display_hub_prepare(tegra->hub);
- if (err < 0)
- goto device;
- }
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = true;
-
- /* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->max_vblank_count = 0xffffffff;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- goto hub;
-
- drm_mode_config_reset(drm);
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- goto hub;
-
- return 0;
-
-hub:
- if (tegra->hub)
- tegra_display_hub_cleanup(tegra->hub);
-device:
- host1x_device_exit(device);
-fbdev:
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_free(drm);
-config:
- drm_mode_config_cleanup(drm);
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- }
-domain:
- if (tegra->domain)
- iommu_domain_free(tegra->domain);
-free:
- kfree(tegra);
- return err;
-}
-
-static void tegra_drm_unload(struct drm_device *drm)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- err = host1x_device_exit(device);
- if (err < 0)
- return;
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- iommu_domain_free(tegra->domain);
- }
-
- kfree(tegra);
-}
-
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
@@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
@@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared)
+int host1x_client_iommu_attach(struct host1x_client *client)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
- if (!shared || (shared && (group != tegra->group))) {
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (client->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(client->dev);
- arm_iommu_detach_device(client->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
+ if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
- return ERR_PTR(err);
+ return err;
}
-
- if (shared && !tegra->group)
- tegra->group = group;
}
+
+ tegra->use_explicit_iommu = true;
}
- return group;
+ client->group = group;
+
+ return 0;
}
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group)
+void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
- if (group) {
- if (group == tegra->group) {
- iommu_detach_group(tegra->domain, group);
- tegra->group = NULL;
- }
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
- iommu_group_put(group);
+ iommu_group_put(client->group);
+ client->group = NULL;
}
}
@@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
+ struct iommu_domain *domain;
+ struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(drm->dev->parent);
+
+ if (domain && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
+
+ err = tegra_drm_fb_prepare(drm);
if (err < 0)
- goto put;
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
+ false);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
- goto put;
+ goto fb;
return 0;
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
put:
drm_dev_put(drm);
return err;
@@ -1229,8 +1233,29 @@ put:
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
drm_dev_put(drm);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 29911eff9ceb..d941553f7a3d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -36,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
- struct iommu_group *group;
+ bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
@@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared);
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
@@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct drm_dp_link;
-
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f49ad36e24db..56edef06c48e 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
- dma_addr_t daddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
- int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
-
- /* ensure that caches are flushed and falcon can see the firmware */
- daddr = dma_map_single(falcon->dev, firmware_vaddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- err = dma_mapping_error(falcon->dev, daddr);
- if (err) {
- dev_err(falcon->dev, "failed to map firmware: %d\n", err);
- return;
- }
- dma_sync_single_for_device(falcon->dev, daddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
- DMA_TO_DEVICE);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
return 0;
}
@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
- falcon->firmware.size = firmware->size;
-
- /* allocate iova space for the firmware */
- falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
- &falcon->firmware.paddr);
- if (IS_ERR(falcon->firmware.vaddr)) {
- dev_err(falcon->dev, "DMA memory mapping failed\n");
- return PTR_ERR(falcon->firmware.vaddr);
- }
-
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
- goto err_setup_firmware_image;
+ return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
-
-err_setup_firmware_image:
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr, falcon->firmware.vaddr);
-
- return err;
}
int falcon_init(struct falcon *falcon)
{
- /* check mandatory ops */
- if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
- return -EINVAL;
-
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
- if (falcon->firmware.firmware) {
+ if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
- falcon->firmware.firmware = NULL;
- }
-
- if (falcon->firmware.vaddr) {
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr,
- falcon->firmware.vaddr);
- falcon->firmware.vaddr = NULL;
- }
}
int falcon_boot(struct falcon *falcon)
@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 3d1243217410..c56ee32d92ee 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
-struct falcon;
-
-struct falcon_ops {
- void *(*alloc)(struct falcon *falcon, size_t size,
- dma_addr_t *paddr);
- void (*free)(struct falcon *falcon, size_t size,
- dma_addr_t paddr, void *vaddr);
-};
-
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
- const struct falcon_ops *ops;
- void *data;
struct falcon_firmware firmware;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e34325c83d28..7cea89f29a5c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
- info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index fb7667c8dd4c..746dae32c484 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct sg_table *sgt;
+ int err;
+
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
+ */
+ if (phys && obj->mm) {
+ *phys = obj->iova;
+ return NULL;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- *sgt = obj->sgt;
+ if (obj->pages) {
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
+ 0, obj->gem.size, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
+ obj->gem.size);
+ if (err < 0)
+ goto free;
+ }
- return obj->paddr;
+ return sgt;
+
+free:
+ kfree(sgt);
+ return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
- bo->paddr = bo->mm->start;
+ bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
@@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
@@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
@@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
@@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
@@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
@@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
- struct scatterlist *sg;
- unsigned int i;
-
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
- goto free;
-
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
-
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
goto free;
-
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
}
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+
return sgt;
free:
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 83ffb1e14ca3..fafb5724499b 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
- dma_addr_t paddr;
+ dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 641299cc85b8..1fc4e56c7cc5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -17,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
@@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
struct gr2d *gr2d = to_gr2d(drm);
int err;
- gr2d->channel = host1x_channel_request(client->dev);
+ gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
@@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
- gr2d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr2d->group)) {
- err = PTR_ERR(gr2d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 8b9a35b1cbb3..24fae0f64032 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
@@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
struct gr3d *gr3d = to_gr3d(drm);
int err;
- gr3d->channel = host1x_channel_request(client->dev);
+ gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
- gr3d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr3d->group)) {
- err = PTR_ERR(gr3d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 839b49c40e51..2b4082d0bc9e 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_bo *bo;
dma_addr_t base;
u32 value;
@@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- bo = tegra_fb_get_plane(fb, 0);
- base = bo->paddr;
+ base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..34373734ff68 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
- output->cec = cec_notifier_get(output->dev);
- if (!output->cec)
- return -ENOMEM;
-
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
- if (output->cec)
- cec_notifier_put(output->cec);
-
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
+ int connector_type;
int err;
if (output->panel) {
@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 6bab71d6e81d..163b590be224 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
+ unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
}
}
@@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->sgt[i] = NULL;
+ }
+
return &copy->base;
}
@@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt;
+
+ sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+ state->iova[i] = bo->iova;
+ }
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt) {
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ }
+ }
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (!state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, state);
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 510c394e6d9a..a158a915109a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
+ struct sg_table *sgt[3];
+ dma_addr_t iova[3];
+
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e1669ada0a40..615cb319fa8b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -25,6 +25,7 @@
#include <drm/drm_scdc_helper.h>
#include "dc.h"
+#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
@@ -370,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
- bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
@@ -382,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
@@ -390,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
@@ -412,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
+ struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
@@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_pad_parents[] = {
- "pll_d2_out0", "pll_dp"
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
};
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
@@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_pad_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
@@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
-static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
- struct drm_dp_link *link)
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
+ struct drm_dp_link *link = &sor->link;
unsigned int i;
- u8 pattern;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
u32 value;
- int err;
- /* setup lane parameters */
- value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
- value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
- SOR_LANE_POSTCURSOR_LANE2(0x00) |
- SOR_LANE_POSTCURSOR_LANE1(0x00) |
- SOR_LANE_POSTCURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
- /* disable LVDS mode */
- tegra_sor_writel(sor, 0, SOR_LVDS);
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value &= ~SOR_DP_PADCTL_TX_PU_MASK;
- value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
- SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- usleep_range(10, 100);
+ usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
- if (err < 0)
- return err;
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN1;
- value = (value << 8) | lane;
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_1;
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
- value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_SEQ_ENABLE;
- value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
+
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN2;
- value = (value << 8) | lane;
+ pattern = pattern << 8 | value;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
- pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_DISABLE;
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
return 0;
}
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
- if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
- output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
@@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
- (link->num_lanes * 8);
+ (link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
@@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
- config->hblank_symbols -= 12 / link->num_lanes;
+ config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
- config->vblank_symbols -= 36 / link->num_lanes + 4;
+ config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
@@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
@@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_edp_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- err = tegra_sor_detach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-
- tegra_sor_writel(sor, 0, SOR_STATE1);
- tegra_sor_update(sor);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_sor_power_down(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-
- if (sor->aux) {
- err = drm_dp_aux_disable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- }
-
- err = tegra_io_pad_power_disable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- pm_runtime_put(sor->dev);
-}
-
-#if 0
-static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
- unsigned int *value)
-{
- unsigned int hfp, hsw, hbp, a = 0, b;
-
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
-
- b = hfp - 1;
-
- pr_info("a: %u, b: %u\n", a, b);
- pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
-
- if (a + hsw + hbp <= 11) {
- a = 1 + 11 - hsw - hbp;
- pr_info("a: %u\n", a);
- }
-
- if (a > b)
- return -EINVAL;
-
- if (hsw < 1)
- return -EINVAL;
-
- if (mode->hdisplay < 16)
- return -EINVAL;
-
- if (value) {
- if (b > a && a % 2)
- *value = a + 1;
- else
- *value = a;
- }
-
- return 0;
-}
-#endif
-
-static void tegra_sor_edp_enable(struct drm_encoder *encoder)
-{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- struct tegra_sor_config config;
- struct tegra_sor_state *state;
- struct drm_dp_link link;
- u8 rate, lanes;
- unsigned int i;
- int err = 0;
- u32 value;
-
- state = to_sor_state(output->connector.state);
-
- pm_runtime_get_sync(sor->dev);
-
- if (output->panel)
- drm_panel_prepare(output->panel);
-
- err = drm_dp_aux_enable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- return;
- }
-
- /* switch to safe parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- memset(&config, 0, sizeof(config));
- config.bits_per_pixel = state->bpc * 3;
-
- err = tegra_sor_compute_config(sor, mode, &config, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to compute configuration: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll3);
- value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, sor->soc->regs->pll3);
-
- value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
- SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, sor->soc->regs->pll1);
-
- while (true) {
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /*
- * power up
- */
-
- /* set safe link bandwidth (1.62 Gbps) */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- /* step 1 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
- SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* step 2 */
- err = tegra_io_pad_power_enable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
-
- usleep_range(5, 100);
-
- /* step 3 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(20, 100);
-
- /* step 4 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value &= ~SOR_PLL0_VCOPD;
- value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(200, 1000);
-
- /* step 5 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /* XXX not in TRM */
- for (value = 0, i = 0; i < 5; i++)
- value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
- SOR_XBAR_CTRL_LINK1_XSEL(i, i);
-
- tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
-
- /* switch to DP parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
- if (err < 0)
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
-
- /* power DP lanes */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
-
- if (link.num_lanes <= 2)
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
- else
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
-
- if (link.num_lanes <= 1)
- value &= ~SOR_DP_PADCTL_PD_TXD_1;
- else
- value |= SOR_DP_PADCTL_PD_TXD_1;
-
- if (link.num_lanes == 0)
- value &= ~SOR_DP_PADCTL_PD_TXD_0;
- else
- value |= SOR_DP_PADCTL_PD_TXD_0;
-
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* start lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
- SOR_LANE_SEQ_CTL_POWER_STATE_UP;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- while (true) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- /* set link bandwidth */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- tegra_sor_apply_config(sor, &config);
-
- /* enable link */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value |= SOR_DP_LINKCTL_ENABLE;
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- for (i = 0, value = 0; i < 4; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
-
- err = drm_dp_link_power_up(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
-
- err = drm_dp_link_configure(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
-
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
-
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* disable training pattern generator */
-
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0)
- dev_err(sor->dev, "DP fast link training failed: %d\n", err);
-
- dev_dbg(sor->dev, "fast link training succeeded\n");
-
- err = tegra_sor_power_up(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-
- /* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
- SOR_CSTM_UPPER;
- tegra_sor_writel(sor, value, SOR_CSTM);
-
- /* use DP-A protocol */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
- value |= SOR_STATE_ASY_PROTOCOL_DP_A;
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- tegra_sor_mode_set(sor, mode, state);
-
- /* PWM setup */
- err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-
- tegra_sor_update(sor);
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
-
- err = tegra_sor_attach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-
- err = tegra_sor_wakeup(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DC: %d\n", err);
-
- if (output->panel)
- drm_panel_enable(output->panel);
-}
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
- .disable = tegra_sor_edp_disable,
- .enable = tegra_sor_edp_enable,
- .atomic_check = tegra_sor_encoder_atomic_check,
-};
-
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
@@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
@@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
@@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
- value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
-
- /* select HDA audio input */
- value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
- value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
-
- /* inject null samples */
- if (sor->format.channels != 2)
- value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
- else
- value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
-
- value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
-
- tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
-
- /* enable advertising HBR capability */
- tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+ tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
@@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
- else
- value &= ~SOR_ENABLE(sor->index);
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- /* switch to parent clock */
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
return;
}
+#endif
+ /* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
- dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
return;
}
@@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
- else
- value |= SOR_ENABLE(sor->index);
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ pm_runtime_put(sor->dev);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ pm_runtime_get_sync(sor->dev);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_sor_dp_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+ .remove = tegra_sor_dp_remove,
+};
+
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
- u32 value;
int err;
if (!sor->aux) {
- if (sor->soc->supports_hdmi) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
@@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
- if (sor->soc->supports_edp) {
+ if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
- helpers = &tegra_sor_edp_helpers;
- } else if (sor->soc->supports_dp) {
+ helpers = &tegra_sor_dp_helpers;
+ } else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
}
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
@@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
- /*
- * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
- * is used for interoperability between the HDA codec driver and the
- * HDMI/DP driver.
- */
- value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
- tegra_sor_writel(sor, value, SOR_INT_ENABLE);
- tegra_sor_writel(sor, value, SOR_INT_MASK);
-
return 0;
}
@@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- tegra_sor_writel(sor, 0, SOR_INT_MASK);
- tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
-
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
-static const struct tegra_sor_ops tegra_sor_edp_ops = {
- .name = "eDP",
-};
-
-static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
-{
- int err;
-
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
- if (IS_ERR(sor->avdd_io_supply)) {
- dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
- PTR_ERR(sor->avdd_io_supply));
- return PTR_ERR(sor->avdd_io_supply);
- }
-
- err = regulator_enable(sor->avdd_io_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
- err);
- return err;
- }
-
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
- if (IS_ERR(sor->vdd_pll_supply)) {
- dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
- PTR_ERR(sor->vdd_pll_supply));
- return PTR_ERR(sor->vdd_pll_supply);
- }
-
- err = regulator_enable(sor->vdd_pll_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
- err);
- return err;
- }
-
- sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
- if (IS_ERR(sor->hdmi_supply)) {
- dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
- PTR_ERR(sor->hdmi_supply));
- return PTR_ERR(sor->hdmi_supply);
- }
-
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
- return err;
- }
-
- INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
-
- return 0;
-}
-
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
-static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
- .name = "HDMI",
- .probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
-};
-
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
@@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
static const struct tegra_sor_soc tegra124_sor = {
- .supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
@@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
- .xbar_cfg = tegra124_sor_xbar_cfg,
-};
-static const u8 tegra210_sor_xbar_cfg[5] = {
- 2, 1, 0, 3, 4
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
- .supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
-
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
@@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
-static const struct tegra_sor_soc tegra186_sor = {
- .supports_edp = false,
- .supports_lvds = false,
- .supports_hdmi = false,
- .supports_dp = true,
-
- .regs = &tegra186_sor_regs,
- .has_nvdisplay = true,
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
- .xbar_cfg = tegra124_sor_xbar_cfg,
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
};
-static const struct tegra_sor_soc tegra186_sor1 = {
- .supports_edp = false,
+static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
-
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
@@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
@@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
- { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
@@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
@@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
- tegra_sor_hdmi_audio_enable(sor);
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
} else {
- tegra_sor_hdmi_audio_disable(sor);
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
}
}
@@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
+
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
- if (sor->soc->supports_edp) {
- sor->ops = &tegra_sor_edp_ops;
- sor->pad = TEGRA_IO_PAD_LVDS;
- } else if (sor->soc->supports_dp) {
- dev_err(&pdev->dev, "DisplayPort not supported yet\n");
- return -ENODEV;
- } else {
- dev_err(&pdev->dev, "unknown (DP) support\n");
- return -ENODEV;
- }
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
@@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
* pad output clock.
*/
if (!sor->clk_pad) {
+ char *name;
+
err = pm_runtime_get_sync(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
@@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->clk_pad = tegra_clk_sor_pad_register(sor,
- "sor1_pad_clkout");
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
pm_runtime_put(&pdev->dev);
}
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f8efd8be4b7c..00e09d5dca30 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index cd0399fd8c63..9444ba183990 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -34,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
@@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
@@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid) {
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+ if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec && spec->num_ids > 0) {
+ if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
@@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
@@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
@@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
-static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_alloc(tegra, size, iova);
-}
-
-static void vic_falcon_free(struct falcon *falcon, size_t size,
- dma_addr_t iova, void *va)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_free(tegra, size, va, iova);
-}
-
-static const struct falcon_ops vic_falcon_ops = {
- .alloc = vic_falcon_alloc,
- .free = vic_falcon_free
-};
-
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (group && tegra->domain) {
- err = iommu_attach_group(tegra->domain, group);
- if (err < 0) {
- dev_err(vic->dev, "failed to attach to domain: %d\n",
- err);
- return err;
- }
-
- vic->domain = tegra->domain;
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
}
- vic->channel = host1x_channel_request(client->dev);
+ vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
@@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
free_syncpt:
@@ -221,8 +201,7 @@ free_syncpt:
free_channel:
host1x_channel_put(vic->channel);
detach:
- if (group && tegra->domain)
- iommu_detach_group(tegra->domain, group);
+ host1x_client_iommu_detach(client);
return err;
}
@@ -230,22 +209,32 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
-
- if (vic->domain) {
- iommu_detach_group(vic->domain, group);
- vic->domain = NULL;
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
}
return 0;
@@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
int err;
- if (vic->falcon.data)
+ if (vic->falcon.firmware.virt)
return 0;
- vic->falcon.data = vic->client.drm;
-
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- goto cleanup;
+ return err;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(vic->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
return 0;
cleanup:
- vic->falcon.data = NULL;
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
return err;
}
@@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
@@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
- vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 03d0e2df6774..94fb1f593564 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev)
kfree(gm12u320);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops);
+DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+ struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c7f86499165f..8d91b0428af1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
- BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->bdev->glob->bo_count);
+ atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -185,25 +184,20 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -310,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- lru = &pos->first->bdev->glob->swap_lru[i];
+ lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -474,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -484,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv);
@@ -511,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@@ -522,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -545,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv;
int ret;
@@ -564,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
@@ -575,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@@ -585,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@@ -594,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -602,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@@ -617,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@@ -841,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@@ -878,11 +859,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
- ttm_bo_get(busy_bo);
- spin_unlock(&glob->lru_lock);
+ kref_get(&busy_bo->list_kref);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- ttm_bo_put(busy_bo);
+ kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -895,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -925,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -934,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ if (!fence)
+ return 0;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
+
+ dma_resv_add_shared_fence(bo->base.resv, fence);
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -977,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1067,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -1119,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
@@ -1159,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@@ -1285,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1314,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1348,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1378,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -1480,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1649,8 +1627,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = &ttm_mem_glob;
- glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1674,10 +1650,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@@ -1746,7 +1722,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1826,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
-
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
- if (unlikely(ret != 0))
- return ret;
- ret = ttm_bo_wait(bo, true, no_wait);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@@ -1950,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
- ;
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..6b0883a1776e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -153,7 +151,6 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.base.resv = &fbo->base.base._resv;
- dma_resv_init(fbo->base.base.resv);
- ret = dma_resv_trylock(fbo->base.base.resv);
+ if (bo->base.resv == &bo->base._resv)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4aa007edffb0..4b34a278d65b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bo->moving != moving) {
- spin_lock(&bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else
ret = vmf_insert_pfn(&cvma, address, pfn);
- /*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
- */
-
- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
- break;
- else if (unlikely(ret & VM_FAULT_ERROR))
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ goto out_io_unlock;
+ else
+ break;
+ }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
@@ -426,6 +424,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -449,24 +469,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
-
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
out_unref:
ttm_bo_put(bo);
@@ -474,17 +477,17 @@ out_unref:
}
EXPORT_SYMBOL(ttm_bo_mmap);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
ttm_bo_get(bo);
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ /*
+ * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
+ * removed. Add it back here until the rest of TTM works without it.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
+
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 131dae8f4170..1797f04c0534 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
}
}
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- dma_resv_unlock(bo->base.resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
+ struct ttm_buffer_object *bo = entry->bo;
+
if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(glob->bo_glob, ctx);
+ ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7d78e6deac89..ff54e7609e8f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a22b75a3a533..edd299ab53d8 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index e94bf75368be..1a07462b4528 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -172,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 55d798c76b21..f2e968b5ffa6 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
- vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 862db495d111..8512d970a09f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vbox_private *vbox;
@@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
- &vbox_fb_helper_funcs, 32,
- vbox->num_crtcs);
+ ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
if (ret)
goto err_irq_fini;
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_irq_fini;
return 0;
-err_fbdev_fini:
- vbox_fbdev_fini(vbox);
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
@@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
- vbox_fbdev_fini(vbox);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
@@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = {
#endif
};
-static const struct file_operations vbox_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(vbox_fops);
static struct drm_driver driver = {
.driver_features =
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index fb436ec760ea..87421903816c 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -16,7 +16,6 @@
#include <linux/string.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
@@ -46,16 +45,9 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct vbox_private {
/* Must be first; or we must define our own release callback */
struct drm_device ddev;
- struct drm_fb_helper fb_helper;
- struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -135,7 +127,6 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
-#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
@@ -146,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects);
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-void vbox_fbdev_fini(struct vbox_private *vbox);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
deleted file mode 100644
index 8f74bcffc034..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2013-2017 Oracle Corporation
- * This file is based on ast_fb.c
- * Copyright 2012 Red Hat Inc.
- * Authors: Dave Airlie <airlied@redhat.com>
- * Michael Thayer <michael.thayer@oracle.com,
- */
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "vbox_drv.h"
-#include "vboxvideo.h"
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
-static struct fb_deferred_io vbox_defio = {
- .delay = HZ / 30,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
-static struct fb_ops vboxfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_private *vbox =
- container_of(helper, struct vbox_private, fb_helper);
- struct pci_dev *pdev = vbox->ddev.pdev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- struct drm_gem_object *gobj;
- struct drm_gem_vram_object *gbo;
- int size, ret;
- s64 gpu_addr;
- u32 pitch;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- mode_cmd.pitches[0] = pitch;
-
- size = pitch * mode_cmd.height;
-
- ret = vbox_gem_create(vbox, size, true, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- info->screen_size = size;
- info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(info->screen_base))
- return PTR_ERR(info->screen_base);
-
- fb = &vbox->afb.base;
- helper->fb = fb;
-
- info->fbops = &vboxfb_ops;
-
- /*
- * This seems to be done for safety checking that the framebuffer
- * is not registered twice by different drivers.
- */
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-
- drm_fb_helper_fill_info(info, helper, sizes);
-
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0)
- return (int)gpu_addr;
- info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
- info->fix.smem_len = vbox->available_vram_size - gpu_addr;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- info->fbdefio = &vbox_defio;
- fb_deferred_io_init(info);
-#endif
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
-
- return 0;
-}
-
-void vbox_fbdev_fini(struct vbox_private *vbox)
-{
- struct vbox_framebuffer *afb = &vbox->afb;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
-#endif
-
- drm_fb_helper_unregister_fbi(&vbox->fb_helper);
-
- if (afb->obj) {
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
-
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&vbox->fb_helper);
-
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 02fa8277ff1e..9dcab115a261 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,22 +11,12 @@
#include <linux/vbox_err.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
-static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
-
- if (vbox_fb->obj)
- drm_gem_object_put_unlocked(vbox_fb->obj);
-
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
@@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox)
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
-/* Send information about dirty rectangles to VBVA. */
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- struct vbox_private *vbox = fb->dev->dev_private;
- struct drm_display_mode *mode;
- struct drm_crtc *crtc;
- int crtc_x, crtc_y;
- unsigned int i;
-
- mutex_lock(&vbox->hw_mutex);
- list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (crtc->primary->state->fb != fb)
- continue;
-
- mode = &crtc->state->mode;
- crtc_x = crtc->primary->state->src_x >> 16;
- crtc_y = crtc->primary->state->src_y >> 16;
-
- for (i = 0; i < num_rects; ++i) {
- struct vbva_cmd_hdr cmd_hdr;
- unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
-
- if (rects[i].x1 > crtc_x + mode->hdisplay ||
- rects[i].y1 > crtc_y + mode->vdisplay ||
- rects[i].x2 < crtc_x ||
- rects[i].y2 < crtc_y)
- continue;
-
- cmd_hdr.x = (s16)rects[i].x1;
- cmd_hdr.y = (s16)rects[i].y1;
- cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
- cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
-
- if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
- vbox->guest_pool))
- continue;
-
- vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
- &cmd_hdr, sizeof(cmd_hdr));
- vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
- }
- }
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs vbox_fb_funcs = {
- .destroy = vbox_user_framebuffer_destroy,
- .dirty = vbox_user_framebuffer_dirty,
-};
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
- vbox_fb->obj = obj;
- ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int vbox_accel_init(struct vbox_private *vbox)
{
struct vbva_buffer *vbva;
@@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox)
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
- size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
-
- *obj = &gbo->bo.base;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index e1e48ba919eb..19612132c8a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -13,7 +13,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (!fb1) {
fb1 = fb;
- if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ if (fb1 == vbox->ddev.fb_helper->fb)
break;
} else if (fb != fb1) {
single_framebuffer = false;
@@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
+
+ /* Send information about dirty rectangles to VBVA. */
+
+ clips = drm_plane_get_damage_clips(plane->state);
+ num_clips = drm_plane_get_damage_clips_count(plane->state);
+
+ if (!num_clips)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ for (i = 0; i < num_clips; ++i, ++clips) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ cmd_hdr.x = (s16)clips->x1;
+ cmd_hdr.y = (s16)clips->y1;
+ cmd_hdr.w = (u16)clips->x2 - clips->x1;
+ cmd_hdr.h = (u16)clips->y2 - clips->y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+
+ mutex_unlock(&vbox->hw_mutex);
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
@@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
-static int vbox_primary_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
-
- return ret;
-}
-
-static void vbox_primary_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
-static int vbox_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-}
-
-static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!plane->state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = vbox_cursor_prepare_fb,
- .cleanup_fb = vbox_cursor_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
- .prepare_fb = vbox_primary_prepare_fb,
- .cleanup_fb = vbox_primary_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 0853b980bcb3..1c62c6c9244b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- frame.avi.right_bar = cstate->tv.margins.right;
- frame.avi.left_bar = cstate->tv.margins.left;
- frame.avi.top_bar = cstate->tv.margins.top;
- frame.avi.bottom_bar = cstate->tv.margins.bottom;
+ drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index a5cb58754f7d..8dee698c90ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -184,7 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-DEFINE_DRM_GEM_SHMEM_FOPS(virtio_gpu_driver_fops);
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 314e02f94d9c..0b56ba005e25 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -267,8 +267,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 0b3cdb0d83b0..2f5773e43557 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -155,16 +155,15 @@ int virtio_gpu_init(struct drm_device *dev)
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
- DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not supported by host");
-#else
- DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
- DRM_INFO("EDID support available.\n");
}
+ DRM_INFO("features: %cvirgl %cedid\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 69a3d310ff70..017a9e0fc3bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -86,7 +86,7 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = &drm_gem_shmem_mmap,
};
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index f4b7360282ce..390524143139 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -132,10 +132,10 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
- cpu_to_le32(plane->state->src_w >> 16),
- cpu_to_le32(plane->state->src_h >> 16),
- cpu_to_le32(plane->state->src_x >> 16),
- cpu_to_le32(plane->state->src_y >> 16),
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
objs, NULL);
}
} else {
@@ -234,8 +234,8 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
+ plane->state->crtc_w,
+ plane->state->crtc_h,
0, 0, objs, vgfb->fence);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 80176f379ad5..74ad3bc3ebe8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -549,8 +549,8 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
@@ -571,10 +571,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 80524a22412a..d1fe144aa289 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -11,13 +11,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
@@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = {
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = vkms_prime_import_sg_table,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -157,6 +160,14 @@ static int __init vkms_init(void)
if (ret)
goto out_unregister;
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_fini;
+ }
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5a95100fa18b..7d52e24564db 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* Prime */
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 6489bfe0a149..2e01186fb943 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-buf.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
+#include <drm/drm_prime.h>
#include "vkms_drv.h"
@@ -218,3 +220,28 @@ out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
+
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct vkms_gem_object *obj;
+ int npages;
+
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
+
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!obj->pages) {
+ vkms_gem_free_object(&obj->gem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+ return &obj->gem;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aad8d8140259..74016a08d118 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = dma_resv_wait_timeout_rcu
@@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_wait(bo, true, nonblock);
+ if (likely(ret == 0))
+ atomic_inc(&user_bo->vbo.cpu_writers);
+
+ ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 29f60e027a38..b18842f73081 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -102,6 +102,8 @@ struct vmw_fpriv {
* @base: The TTM buffer object
* @res_list: List of resources using this buffer object as a backing MOB
* @pin_count: pin depth
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
@@ -110,6 +112,7 @@ struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
+ atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5581a7826b4c..6dfe36fb817c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..7bff3628fc54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
+ if (atomic_read(&vbo->cpu_writers))
+ return -EBUSY;
+
if (vbo->pin_count > 0)
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 0e063743dd86..71ce4b318850 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index ba1828acd8c9..4be49c1aef51 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -718,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
struct device *dev = &xb_dev->dev;
int ret;
- /*
- * The device is not spawn from a device tree, so arch_setup_dma_ops
- * is not called, thus leaving the device with dummy DMA ops.
- * This makes the device return error on PRIME buffer import, which
- * is not correct: to fix this call of_dma_configure() with a NULL
- * node to set default DMA ops.
- */
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ret = of_dma_configure(dev, NULL, true);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0) {
- DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+ DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 21ad1c359b61..ff506bc99414 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
-display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
- container_of(crtc, struct xen_drm_front_drm_pipeline,
- pipe.crtc);
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index cf987a317a55..6dab94adf25e 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,7 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 742aa9ff21b8..2c8559ff3481 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
- dma_set_max_seg_size(&device->dev, SZ_4M);
+ dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 48c84c48299c..e8d3fda91d8a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
*
* Must be called with the cdma lock held.
*/
-int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
- struct host1x_cdma *cdma,
- unsigned int needed)
+static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
+ struct host1x_cdma *cdma,
+ unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 1436295aa450..4cd212bb570d 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
/**
* host1x_channel_request() - Allocate a channel
- * @device: Host1x unit this channel will be used to send commands to
+ * @client: Host1x client this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. May return NULL if CDMA
+ * Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
-struct host1x_channel *host1x_channel_request(struct device *dev)
+struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
@@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
- channel->dev = dev;
+ channel->client = client;
+ channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
@@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
fail:
clear_bit(channel->id, chlist->allocated_channels);
- dev_err(dev, "failed to initialize channel\n");
+ dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 4fd694834f74..39044ff6c3aa 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -26,6 +26,7 @@ struct host1x_channel {
unsigned int id;
struct mutex submitlock;
void __iomem *regs;
+ struct host1x_client *client;
struct device *dev;
struct host1x_cdma cdma;
};
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 5a3f797240d4..a738ea55e407 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -18,10 +18,6 @@
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x02_info = {
@@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x04_info = {
@@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x05_info = {
@@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
@@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
@@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
+ int err;
+
+ /*
+ * If the host1x firewall is enabled, there's no need to enable IOMMU
+ * support. Similarly, if host1x is already attached to an IOMMU (via
+ * the DMA API), don't try to attach again.
+ */
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ return domain;
+
+ host->group = iommu_group_get(host->dev);
+ if (host->group) {
+ struct iommu_domain_geometry *geometry;
+ dma_addr_t start, end;
+ unsigned long order;
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto put_group;
+
+ host->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!host->domain) {
+ err = -ENOMEM;
+ goto put_cache;
+ }
+
+ err = iommu_attach_group(host->domain, host->group);
+ if (err) {
+ if (err == -ENODEV)
+ err = 0;
+
+ goto free_domain;
+ }
+
+ geometry = &host->domain->geometry;
+ start = geometry->aperture_start & host->info->dma_mask;
+ end = geometry->aperture_end & host->info->dma_mask;
+
+ order = __ffs(host->domain->pgsize_bitmap);
+ init_iova_domain(&host->iova, 1UL << order, start >> order);
+ host->iova_end = end;
+
+ domain = host->domain;
+ }
+
+ return domain;
+
+free_domain:
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+put_cache:
+ iova_cache_put();
+put_group:
+ iommu_group_put(host->group);
+ host->group = NULL;
+
+ return ERR_PTR(err);
+}
+
+static int host1x_iommu_init(struct host1x *host)
+{
+ u64 mask = host->info->dma_mask;
+ struct iommu_domain *domain;
+ int err;
+
+ domain = host1x_iommu_attach(host);
+ if (IS_ERR(domain)) {
+ err = PTR_ERR(domain);
+ dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
+ return err;
+ }
+
+ /*
+ * If we're not behind an IOMMU make sure we don't get push buffers
+ * that are allocated outside of the range addressable by the GATHER
+ * opcode.
+ *
+ * Newer generations of Tegra (Tegra186 and later) support a wide
+ * variant of the GATHER opcode that allows addressing more bits.
+ */
+ if (!domain && !host->info->has_wide_gather)
+ mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(host->dev, mask);
+ if (err < 0) {
+ dev_err(host->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void host1x_iommu_exit(struct host1x *host)
+{
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_group(host->domain, host->group);
+
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+
+ iova_cache_put();
+
+ iommu_group_put(host->group);
+ host->group = NULL;
+ }
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->hv_regs);
}
- dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+ host->dev->dma_parms = &host->dma_parms;
+ dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
@@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (host->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(host->dev);
- arm_iommu_detach_device(host->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
- goto skip_iommu;
-
- host->group = iommu_group_get(&pdev->dev);
- if (host->group) {
- struct iommu_domain_geometry *geometry;
- u64 mask = dma_get_mask(host->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- err = iova_cache_get();
- if (err < 0)
- goto put_group;
-
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
- goto put_cache;
- }
- err = iommu_attach_group(host->domain, host->group);
- if (err) {
- if (err == -ENODEV) {
- iommu_domain_free(host->domain);
- host->domain = NULL;
- iova_cache_put();
- iommu_group_put(host->group);
- host->group = NULL;
- goto skip_iommu;
- }
-
- goto fail_free_domain;
- }
-
- geometry = &host->domain->geometry;
- start = geometry->aperture_start & mask;
- end = geometry->aperture_end & mask;
-
- order = __ffs(host->domain->pgsize_bitmap);
- init_iova_domain(&host->iova, 1UL << order, start >> order);
- host->iova_end = end;
+ err = host1x_iommu_init(host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
+ return err;
}
-skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
- goto fail_detach_device;
+ goto iommu_exit;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
- goto fail_free_channels;
+ goto free_channels;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto fail_unprepare_disable;
+ goto unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto fail_reset_assert;
+ goto reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
- goto fail_deinit_syncpt;
+ goto deinit_syncpt;
}
host1x_debug_init(host);
@@ -351,33 +432,22 @@ skip_iommu:
err = host1x_register(host);
if (err < 0)
- goto fail_deinit_intr;
+ goto deinit_intr;
return 0;
-fail_deinit_intr:
+deinit_intr:
host1x_intr_deinit(host);
-fail_deinit_syncpt:
+deinit_syncpt:
host1x_syncpt_deinit(host);
-fail_reset_assert:
+reset_assert:
reset_control_assert(host->rst);
-fail_unprepare_disable:
+unprepare_disable:
clk_disable_unprepare(host->clk);
-fail_free_channels:
+free_channels:
host1x_channel_list_free(&host->channel_list);
-fail_detach_device:
- if (host->group && host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- }
-fail_free_domain:
- if (host->domain)
- iommu_domain_free(host->domain);
-put_cache:
- if (host->group)
- iova_cache_put();
-put_group:
- iommu_group_put(host->group);
+iommu_exit:
+ host1x_iommu_exit(host);
return err;
}
@@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
-
- if (host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- iommu_domain_free(host->domain);
- iova_cache_put();
- iommu_group_put(host->group);
- }
+ host1x_iommu_exit(host);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ff56f5e23a02..f781a9b0f39d 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -97,6 +97,7 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_wide_gather; /* supports GATHER_W opcode */
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
@@ -140,6 +141,8 @@ struct host1x {
struct list_head devices;
struct list_head list;
+
+ struct device_dma_parameters dma_parms;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 26f3c741d085..9245add23b5d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
/* Add nr_completed to trace */
trace_host1x_channel_submit_complete(dev_name(channel->dev),
waiter->count, waiter->thresh);
-
}
static void action_wakeup(struct host1x_waitlist *waiter)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index eaa5c3352c13..25ca54de8fc5 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ struct host1x_client *client = job->client;
+ struct device *dev = client->dev;
unsigned int i;
int err;
@@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
+ dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
- dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
+ if (client->group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ if (sgt) {
+ unsigned long mask = HOST1X_RELOC_READ |
+ HOST1X_RELOC_WRITE;
+ enum dma_data_direction dir;
+
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ dir = DMA_TO_DEVICE;
+ break;
+
+ case HOST1X_RELOC_WRITE:
+ dir = DMA_FROM_DEVICE;
+ break;
+
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = dev;
+ job->unpins[job->num_unpins].dir = dir;
+ phys_addr = sg_dma_address(sgt->sgl);
+ }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(g->bo, &sgt);
+ sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
@@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- job->addr_phys[job->num_unpins] =
- iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
+ phys_addr = iova_dma_addr(&host->iova, alloc);
} else {
- job->addr_phys[job->num_unpins] = phys_addr;
+ err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = host->dev;
+ phys_addr = sg_dma_address(sgt->sgl);
}
- job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->gather_addr_phys[i] = phys_addr;
+ job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
@@ -436,7 +494,8 @@ out:
return err;
}
-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+static inline int copy_gathers(struct device *host, struct host1x_job *job,
+ struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
@@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
- job->gather_copy_mapped = dma_alloc_wc(dev, size,
+ job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
@@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
- err = copy_gathers(job, dev);
+ err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
@@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ struct device *dev = unpin->dev ?: host->dev;
+ struct sg_table *sgt = unpin->sgt;
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
unpin->size && host->domain) {
@@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
iova_pfn(&host->iova, job->addr_phys[i]));
}
- host1x_bo_unpin(unpin->bo, unpin->sgt);
+ if (unpin->dev && sgt)
+ dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
+ unpin->dir);
+
+ host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_wc(job->channel->dev, job->gather_copy_size,
+ dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 62b8805e6b35..94bc2e4ae241 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -8,6 +8,8 @@
#ifndef __HOST1X_JOB_H
#define __HOST1X_JOB_H
+#include <linux/dma-direction.h>
+
struct host1x_job_gather {
unsigned int words;
dma_addr_t base;
@@ -19,7 +21,9 @@ struct host1x_job_gather {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
+ struct device *dev;
size_t size;
+ enum dma_data_direction dir;
};
/*
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index cc5b09b87ab0..79a28fc91521 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
- const int packet_size = 0x100;
- int ret;
struct hv_device *device = context;
- u32 bytes_recvd;
- u64 req_id;
struct vmpacket_descriptor *desc;
- unsigned char *buffer;
- int bufferlen = packet_size;
-
- buffer = kmalloc(bufferlen, GFP_ATOMIC);
- if (!buffer)
- return;
-
- do {
- ret = vmbus_recvpacket_raw(device->channel, buffer,
- bufferlen, &bytes_recvd, &req_id);
-
- switch (ret) {
- case 0:
- if (bytes_recvd <= 0) {
- kfree(buffer);
- return;
- }
- desc = (struct vmpacket_descriptor *)buffer;
-
- switch (desc->type) {
- case VM_PKT_COMP:
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type, req_id, bytes_recvd);
- break;
- }
+ foreach_vmbus_pkt(desc, device->channel) {
+ switch (desc->type) {
+ case VM_PKT_COMP:
break;
- case -ENOBUFS:
- kfree(buffer);
- /* Handle large packet */
- bufferlen = bytes_recvd;
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-
- if (!buffer)
- return;
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(device, desc);
+ break;
+ default:
+ pr_err("Unhandled packet type %d, tid %llx len %d\n",
+ desc->type, desc->trans_id, desc->len8 * 8);
break;
}
- } while (1);
-
+ }
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 391f0b225c9a..53a60c81e220 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -912,6 +912,7 @@ static void vmbus_shutdown(struct device *child_device)
drv->shutdown(dev);
}
+#ifdef CONFIG_PM_SLEEP
/*
* vmbus_suspend - Suspend a vmbus device
*/
@@ -949,6 +950,7 @@ static int vmbus_resume(struct device *child_device)
return drv->resume(dev);
}
+#endif /* CONFIG_PM_SLEEP */
/*
* vmbus_device_release - Final callback release of the vmbus child device
@@ -1070,6 +1072,7 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
+#ifdef CONFIG_PM_SLEEP
/*
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
* hibernation, because hv_sock connections can not persist across hibernation.
@@ -1105,6 +1108,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
vmbus_connection.work_queue,
&ctx->work);
}
+#endif /* CONFIG_PM_SLEEP */
/*
* Direct callback for channels using other deferred processing
@@ -2125,6 +2129,7 @@ acpi_walk_err:
return ret_val;
}
+#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
struct vmbus_channel *channel, *sc;
@@ -2247,6 +2252,7 @@ static int vmbus_bus_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95b447cfa24c..b26419dbe840 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -99,6 +99,8 @@ struct nct7904_data {
u8 enable_dts;
u8 has_dts;
u8 temp_mode; /* 0: TR mode, 1: TD mode */
+ u8 fan_alarm[2];
+ u8 vsen_alarm[3];
};
/* Access functions */
@@ -214,7 +216,15 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
SMI_STS5_REG + (channel >> 3));
if (ret < 0)
return ret;
- *val = (ret >> (channel & 0x07)) & 1;
+ if (!data->fan_alarm[channel >> 3])
+ data->fan_alarm[channel >> 3] = ret & 0xff;
+ else
+ /* If there is new alarm showing up */
+ data->fan_alarm[channel >> 3] |= (ret & 0xff);
+ *val = (data->fan_alarm[channel >> 3] >> (channel & 0x07)) & 1;
+ /* Needs to clean the alarm if alarm existing */
+ if (*val)
+ data->fan_alarm[channel >> 3] ^= 1 << (channel & 0x07);
return 0;
default:
return -EOPNOTSUPP;
@@ -298,7 +308,15 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
SMI_STS1_REG + (index >> 3));
if (ret < 0)
return ret;
- *val = (ret >> (index & 0x07)) & 1;
+ if (!data->vsen_alarm[index >> 3])
+ data->vsen_alarm[index >> 3] = ret & 0xff;
+ else
+ /* If there is new alarm showing up */
+ data->vsen_alarm[index >> 3] |= (ret & 0xff);
+ *val = (data->vsen_alarm[index >> 3] >> (index & 0x07)) & 1;
+ /* Needs to clean the alarm if alarm existing */
+ if (*val)
+ data->vsen_alarm[index >> 3] ^= 1 << (index & 0x07);
return 0;
default:
return -EOPNOTSUPP;
@@ -915,12 +933,15 @@ static int nct7904_probe(struct i2c_client *client,
data->temp_mode = 0;
for (i = 0; i < 4; i++) {
- val = (ret & (0x03 << i)) >> (i * 2);
+ val = (ret >> (i * 2)) & 0x03;
bit = (1 << i);
- if (val == 0)
+ if (val == 0) {
data->tcpu_mask &= ~bit;
- else if (val == 0x1 || val == 0x2)
- data->temp_mode |= bit;
+ } else {
+ if (val == 0x1 || val == 0x2)
+ data->temp_mode |= bit;
+ data->vsen_mask &= ~(0x06 << (i * 2));
+ }
}
/* PECI */
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index 055227cb3d43..67b8817995c0 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct adxl372_state *st)
if (ret < 0)
return ret;
- fifo_samples = st->watermark & 0xFF;
+ /*
+ * watermark stores the number of sets; we need to write the FIFO
+ * registers with the number of samples
+ */
+ fifo_samples = (st->watermark * st->fifo_set_size);
fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) |
ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) |
- ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark);
+ ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples);
- ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples);
+ ret = regmap_write(st->regmap,
+ ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF);
if (ret < 0)
return ret;
@@ -548,8 +553,7 @@ static irqreturn_t adxl372_trigger_handler(int irq, void *p)
goto err;
/* Each sample is 2 bytes */
- for (i = 0; i < fifo_entries * sizeof(u16);
- i += st->fifo_set_size * sizeof(u16))
+ for (i = 0; i < fifo_entries; i += st->fifo_set_size)
iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
}
err:
@@ -571,6 +575,14 @@ static int adxl372_setup(struct adxl372_state *st)
return -ENODEV;
}
+ /*
+ * Perform a software reset to make sure the device is in a consistent
+ * state after start up.
+ */
+ ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE);
+ if (ret < 0)
+ return ret;
+
ret = adxl372_set_op_mode(st, ADXL372_STANDBY);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index cf6c0e3a83d3..121b4e89f038 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -117,7 +117,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
-#define BMC150_ACCEL_TEMP_CENTER_VAL 24
+#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 5a3ca5904ded..f658012baad8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -810,10 +810,10 @@ static int ad799x_probe(struct i2c_client *client,
ret = ad799x_write_config(st, st->chip_config->default_config);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
ret = ad799x_read_config(st);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
st->config = ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index adc9cf7a075d..8ea2aed6d6f5 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -7,6 +7,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -25,6 +26,11 @@
#define AXP288_ADC_EN_MASK 0xF0
#define AXP288_ADC_TS_ENABLE 0x01
+#define AXP288_ADC_TS_BIAS_MASK GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA (0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA (1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA (2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA (3 << 4)
#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
@@ -177,10 +183,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+ {
+ /* Lenovo Ideapad 100S (11 inch) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+ },
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+ },
+ {}
+};
+
static int axp288_adc_initialize(struct axp288_adc_info *info)
{
+ const struct dmi_system_id *bias_override;
int ret, adc_enable_val;
+ bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+ if (bias_override) {
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_BIAS_MASK,
+ (uintptr_t)bias_override->driver_data);
+ if (ret)
+ return ret;
+ }
+
/*
* Determine if the TS pin is enabled and set the TS current-source
* accordingly.
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 88c7fe15003b..62e6c8badd22 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -100,14 +100,14 @@ struct hx711_data {
static int hx711_cycle(struct hx711_data *hx711_data)
{
- int val;
+ unsigned long flags;
/*
* if preempted for more then 60us while PD_SCK is high:
* hx711 is going in reset
* ==> measuring is false
*/
- preempt_disable();
+ local_irq_save(flags);
gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
/*
@@ -117,7 +117,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
ndelay(hx711_data->data_ready_delay_ns);
- val = gpiod_get_value(hx711_data->gpiod_dout);
/*
* here we are not waiting for 0.2 us as suggested by the datasheet,
* because the oscilloscope showed in a test scenario
@@ -125,7 +124,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
* and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
*/
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
- preempt_enable();
+ local_irq_restore(flags);
/*
* make it a square wave for addressing cases with capacitance on
@@ -133,7 +132,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
ndelay(hx711_data->data_ready_delay_ns);
- return val;
+ /* sample as late as possible */
+ return gpiod_get_value(hx711_data->gpiod_dout);
}
static int hx711_read(struct hx711_data *hx711_data)
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b28d045d271..7b27306330a3 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ priv->param->regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq)
return -EINVAL;
@@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- priv->param->regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
priv->clkin = devm_clk_get(&pdev->dev, "clkin");
if (IS_ERR(priv->clkin)) {
dev_err(&pdev->dev, "failed to get clkin\n");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 9b85fefc0a96..93a096a91f8c 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -24,33 +24,6 @@
#include "stm32-adc-core.h"
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3 BIT(17)
-#define STM32F4_EOC2 BIT(9)
-#define STM32F4_EOC1 BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT 16
-#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV BIT(18)
-#define STM32H7_EOC_MST BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT 18
-#define STM32H7_PRESC_MASK GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT 16
-#define STM32H7_CKMODE_MASK GENMASK(17, 16)
-
#define STM32_ADC_CORE_SLEEP_DELAY_MS 2000
/* SYSCFG registers */
@@ -71,6 +44,8 @@
* @eoc1: adc1 end of conversion flag in @csr
* @eoc2: adc2 end of conversion flag in @csr
* @eoc3: adc3 end of conversion flag in @csr
+ * @ier: interrupt enable register offset for each adc
+ * @eocie_msk: end of conversion interrupt enable mask in @ier
*/
struct stm32_adc_common_regs {
u32 csr;
@@ -78,6 +53,8 @@ struct stm32_adc_common_regs {
u32 eoc1_msk;
u32 eoc2_msk;
u32 eoc3_msk;
+ u32 ier;
+ u32 eocie_msk;
};
struct stm32_adc_priv;
@@ -303,6 +280,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
.eoc1_msk = STM32F4_EOC1,
.eoc2_msk = STM32F4_EOC2,
.eoc3_msk = STM32F4_EOC3,
+ .ier = STM32F4_ADC_CR1,
+ .eocie_msk = STM32F4_EOCIE,
};
/* STM32H7 common registers definitions */
@@ -311,8 +290,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
.ccr = STM32H7_ADC_CCR,
.eoc1_msk = STM32H7_EOC_MST,
.eoc2_msk = STM32H7_EOC_SLV,
+ .ier = STM32H7_ADC_IER,
+ .eocie_msk = STM32H7_EOCIE,
+};
+
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+ 0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
};
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+ unsigned int adc)
+{
+ u32 ier, offset = stm32_adc_offset[adc];
+
+ ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+ return ier & priv->cfg->regs->eocie_msk;
+}
+
/* ADC common interrupt for all instances */
static void stm32_adc_irq_handler(struct irq_desc *desc)
{
@@ -323,13 +318,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
- if (status & priv->cfg->regs->eoc1_msk)
+ /*
+ * End of conversion may be handled by using IRQ or DMA. There may be a
+ * race here when two conversions complete at the same time on several
+ * ADCs. EOC may be read 'set' for several ADCs, with:
+ * - an ADC configured to use DMA (EOC triggers the DMA request, and
+ * is then automatically cleared by DR read in hardware)
+ * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+ * be called in this case)
+ * So both EOC status bit in CSR and EOCIE control bit must be checked
+ * before invoking the interrupt handler (e.g. call ISR only for
+ * IRQ-enabled ADCs).
+ */
+ if (status & priv->cfg->regs->eoc1_msk &&
+ stm32_adc_eoc_enabled(priv, 0))
generic_handle_irq(irq_find_mapping(priv->domain, 0));
- if (status & priv->cfg->regs->eoc2_msk)
+ if (status & priv->cfg->regs->eoc2_msk &&
+ stm32_adc_eoc_enabled(priv, 1))
generic_handle_irq(irq_find_mapping(priv->domain, 1));
- if (status & priv->cfg->regs->eoc3_msk)
+ if (status & priv->cfg->regs->eoc3_msk &&
+ stm32_adc_eoc_enabled(priv, 2))
generic_handle_irq(irq_find_mapping(priv->domain, 2));
chained_irq_exit(chip, desc);
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 8af507b3f32d..2579d514c2a3 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -25,8 +25,145 @@
* --------------------------------------------------------
*/
#define STM32_ADC_MAX_ADCS 3
+#define STM32_ADC_OFFSET 0x100
#define STM32_ADCX_COMN_OFFSET 0x300
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR 0x00
+#define STM32F4_ADC_CR1 0x04
+#define STM32F4_ADC_CR2 0x08
+#define STM32F4_ADC_SMPR1 0x0C
+#define STM32F4_ADC_SMPR2 0x10
+#define STM32F4_ADC_HTR 0x24
+#define STM32F4_ADC_LTR 0x28
+#define STM32F4_ADC_SQR1 0x2C
+#define STM32F4_ADC_SQR2 0x30
+#define STM32F4_ADC_SQR3 0x34
+#define STM32F4_ADC_JSQR 0x38
+#define STM32F4_ADC_JDR1 0x3C
+#define STM32F4_ADC_JDR2 0x40
+#define STM32F4_ADC_JDR3 0x44
+#define STM32F4_ADC_JDR4 0x48
+#define STM32F4_ADC_DR 0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT BIT(4)
+#define STM32F4_EOC BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT 24
+#define STM32F4_RES_MASK GENMASK(25, 24)
+#define STM32F4_SCAN BIT(8)
+#define STM32F4_EOCIE BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART BIT(30)
+#define STM32F4_EXTEN_SHIFT 28
+#define STM32F4_EXTEN_MASK GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT 24
+#define STM32F4_EXTSEL_MASK GENMASK(27, 24)
+#define STM32F4_EOCS BIT(10)
+#define STM32F4_DDS BIT(9)
+#define STM32F4_DMA BIT(8)
+#define STM32F4_ADON BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3 BIT(17)
+#define STM32F4_EOC2 BIT(9)
+#define STM32F4_EOC1 BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT 16
+#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR 0x00
+#define STM32H7_ADC_IER 0x04
+#define STM32H7_ADC_CR 0x08
+#define STM32H7_ADC_CFGR 0x0C
+#define STM32H7_ADC_SMPR1 0x14
+#define STM32H7_ADC_SMPR2 0x18
+#define STM32H7_ADC_PCSEL 0x1C
+#define STM32H7_ADC_SQR1 0x30
+#define STM32H7_ADC_SQR2 0x34
+#define STM32H7_ADC_SQR3 0x38
+#define STM32H7_ADC_SQR4 0x3C
+#define STM32H7_ADC_DR 0x40
+#define STM32H7_ADC_DIFSEL 0xC0
+#define STM32H7_ADC_CALFACT 0xC4
+#define STM32H7_ADC_CALFACT2 0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY BIT(12)
+#define STM32H7_EOC BIT(2)
+#define STM32H7_ADRDY BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL BIT(31)
+#define STM32H7_ADCALDIF BIT(30)
+#define STM32H7_DEEPPWD BIT(29)
+#define STM32H7_ADVREGEN BIT(28)
+#define STM32H7_LINCALRDYW6 BIT(27)
+#define STM32H7_LINCALRDYW5 BIT(26)
+#define STM32H7_LINCALRDYW4 BIT(25)
+#define STM32H7_LINCALRDYW3 BIT(24)
+#define STM32H7_LINCALRDYW2 BIT(23)
+#define STM32H7_LINCALRDYW1 BIT(22)
+#define STM32H7_ADCALLIN BIT(16)
+#define STM32H7_BOOST BIT(8)
+#define STM32H7_ADSTP BIT(4)
+#define STM32H7_ADSTART BIT(2)
+#define STM32H7_ADDIS BIT(1)
+#define STM32H7_ADEN BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT 10
+#define STM32H7_EXTEN_MASK GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT 5
+#define STM32H7_EXTSEL_MASK GENMASK(9, 5)
+#define STM32H7_RES_SHIFT 2
+#define STM32H7_RES_MASK GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT 0
+#define STM32H7_DMNGT_MASK GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+ STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */
+ STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */
+ STM32H7_DMNGT_DFSDM, /* DFSDM mode */
+ STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT 16
+#define STM32H7_CALFACT_D_MASK GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT 0
+#define STM32H7_CALFACT_S_MASK GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT 0
+#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV BIT(18)
+#define STM32H7_EOC_MST BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT 18
+#define STM32H7_PRESC_MASK GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT 16
+#define STM32H7_CKMODE_MASK GENMASK(17, 16)
+
/**
* struct stm32_adc_common - stm32 ADC driver common data (for all instances)
* @base: control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 6a7dd08b1e0b..663f8a5012d6 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -28,115 +28,6 @@
#include "stm32-adc-core.h"
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR 0x00
-#define STM32F4_ADC_CR1 0x04
-#define STM32F4_ADC_CR2 0x08
-#define STM32F4_ADC_SMPR1 0x0C
-#define STM32F4_ADC_SMPR2 0x10
-#define STM32F4_ADC_HTR 0x24
-#define STM32F4_ADC_LTR 0x28
-#define STM32F4_ADC_SQR1 0x2C
-#define STM32F4_ADC_SQR2 0x30
-#define STM32F4_ADC_SQR3 0x34
-#define STM32F4_ADC_JSQR 0x38
-#define STM32F4_ADC_JDR1 0x3C
-#define STM32F4_ADC_JDR2 0x40
-#define STM32F4_ADC_JDR3 0x44
-#define STM32F4_ADC_JDR4 0x48
-#define STM32F4_ADC_DR 0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT BIT(4)
-#define STM32F4_EOC BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT 24
-#define STM32F4_RES_MASK GENMASK(25, 24)
-#define STM32F4_SCAN BIT(8)
-#define STM32F4_EOCIE BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART BIT(30)
-#define STM32F4_EXTEN_SHIFT 28
-#define STM32F4_EXTEN_MASK GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT 24
-#define STM32F4_EXTSEL_MASK GENMASK(27, 24)
-#define STM32F4_EOCS BIT(10)
-#define STM32F4_DDS BIT(9)
-#define STM32F4_DMA BIT(8)
-#define STM32F4_ADON BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR 0x00
-#define STM32H7_ADC_IER 0x04
-#define STM32H7_ADC_CR 0x08
-#define STM32H7_ADC_CFGR 0x0C
-#define STM32H7_ADC_SMPR1 0x14
-#define STM32H7_ADC_SMPR2 0x18
-#define STM32H7_ADC_PCSEL 0x1C
-#define STM32H7_ADC_SQR1 0x30
-#define STM32H7_ADC_SQR2 0x34
-#define STM32H7_ADC_SQR3 0x38
-#define STM32H7_ADC_SQR4 0x3C
-#define STM32H7_ADC_DR 0x40
-#define STM32H7_ADC_DIFSEL 0xC0
-#define STM32H7_ADC_CALFACT 0xC4
-#define STM32H7_ADC_CALFACT2 0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY BIT(12)
-#define STM32H7_EOC BIT(2)
-#define STM32H7_ADRDY BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL BIT(31)
-#define STM32H7_ADCALDIF BIT(30)
-#define STM32H7_DEEPPWD BIT(29)
-#define STM32H7_ADVREGEN BIT(28)
-#define STM32H7_LINCALRDYW6 BIT(27)
-#define STM32H7_LINCALRDYW5 BIT(26)
-#define STM32H7_LINCALRDYW4 BIT(25)
-#define STM32H7_LINCALRDYW3 BIT(24)
-#define STM32H7_LINCALRDYW2 BIT(23)
-#define STM32H7_LINCALRDYW1 BIT(22)
-#define STM32H7_ADCALLIN BIT(16)
-#define STM32H7_BOOST BIT(8)
-#define STM32H7_ADSTP BIT(4)
-#define STM32H7_ADSTART BIT(2)
-#define STM32H7_ADDIS BIT(1)
-#define STM32H7_ADEN BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT 10
-#define STM32H7_EXTEN_MASK GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT 5
-#define STM32H7_EXTSEL_MASK GENMASK(9, 5)
-#define STM32H7_RES_SHIFT 2
-#define STM32H7_RES_MASK GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT 0
-#define STM32H7_DMNGT_MASK GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
- STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */
- STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */
- STM32H7_DMNGT_DFSDM, /* DFSDM mode */
- STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT 16
-#define STM32H7_CALFACT_D_MASK GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT 0
-#define STM32H7_CALFACT_S_MASK GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT 0
-#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
-
/* Number of linear calibration shadow registers / LINCALRDYW control bits */
#define STM32H7_LINCALFACT_NUM 6
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 9ac8356d9a95..4998a89d083d 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
- if (!adis->buffer)
+ if (!adis->buffer) {
+ kfree(adis->xfer);
+ adis->xfer = NULL;
return -ENOMEM;
+ }
tx = adis->buffer + burst_length;
tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
@@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
- if (!adis->buffer)
+ if (!adis->buffer) {
+ kfree(adis->xfer);
+ adis->xfer = NULL;
return -ENOMEM;
+ }
rx = adis->buffer;
tx = rx + scan_count;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 80e42c7dbcbe..0fe6999b8257 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -99,7 +99,9 @@ struct st_lsm6dsx_fs {
#define ST_LSM6DSX_FS_LIST_SIZE 4
struct st_lsm6dsx_fs_table_entry {
struct st_lsm6dsx_reg reg;
+
struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE];
+ int fs_len;
};
/**
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 2d3495560136..fd5ebe1e1594 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
.fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+ .fs_len = 3,
},
},
},
@@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
int i, err;
fs_table = &sensor->hw->settings->fs_table[sensor->id];
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
+ for (i = 0; i < fs_table->fs_len; i++) {
if (fs_table->fs_avl[i].gain == gain)
break;
+ }
- if (i == ST_LSM6DSX_FS_LIST_SIZE)
+ if (i == fs_table->fs_len)
return -EINVAL;
data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val,
@@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
const struct st_lsm6dsx_fs_table_entry *fs_table;
- enum st_lsm6dsx_sensor_id id = sensor->id;
struct st_lsm6dsx_hw *hw = sensor->hw;
int i, len = 0;
- fs_table = &hw->settings->fs_table[id];
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
- if (!fs_table->fs_avl[i].gain)
- break;
-
+ fs_table = &hw->settings->fs_table[sensor->id];
+ for (i = 0; i < fs_table->fs_len; i++)
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
fs_table->fs_avl[i].gain);
- }
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 66fbcd94642d..ea472cf6db7b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.gain = 1500,
.val = 0x0,
}, /* 1500 uG/LSB */
+ .fs_len = 1,
},
.temp_comp = {
.addr = 0x60,
@@ -92,9 +93,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
+ u16 odr;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- msleep((2000U / sensor->odr) + 1);
+ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
+ msleep((2000U / odr) + 1);
}
/**
@@ -555,13 +558,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev,
int i, len = 0;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
- u16 val = settings->fs_table.fs_avl[i].gain;
-
- if (val > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- val);
- }
+ for (i = 0; i < settings->fs_table.fs_len; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ settings->fs_table.fs_avl[i].gain);
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 08d7e1ef2186..4a1a883dc061 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -314,6 +314,7 @@ config MAX44009
config NOA1305
tristate "ON Semiconductor NOA1305 ambient light sensor"
depends on I2C
+ select REGMAP_I2C
help
Say Y here if you want to build support for the ON Semiconductor
NOA1305 ambient light sensor.
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index e666879007d2..92004a2563ea 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
struct iio_dev *iio = _iio;
struct opt3001 *opt = iio_priv(iio);
int ret;
+ bool wake_result_ready_queue = false;
if (!opt->ok_to_ignore_lock)
mutex_lock(&opt->lock);
@@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
opt->result = ret;
opt->result_ready = true;
- wake_up(&opt->result_ready_queue);
+ wake_result_ready_queue = true;
}
out:
if (!opt->ok_to_ignore_lock)
mutex_unlock(&opt->lock);
+ if (wake_result_ready_queue)
+ wake_up(&opt->result_ready_queue);
+
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 51421ac32517..16dacea9eadf 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -398,19 +398,23 @@ static int vcnl4000_probe(struct i2c_client *client,
static const struct of_device_id vcnl_4000_of_match[] = {
{
.compatible = "vishay,vcnl4000",
- .data = "VCNL4000",
+ .data = (void *)VCNL4000,
},
{
.compatible = "vishay,vcnl4010",
- .data = "VCNL4010",
+ .data = (void *)VCNL4010,
},
{
- .compatible = "vishay,vcnl4010",
- .data = "VCNL4020",
+ .compatible = "vishay,vcnl4020",
+ .data = (void *)VCNL4010,
+ },
+ {
+ .compatible = "vishay,vcnl4040",
+ .data = (void *)VCNL4040,
},
{
.compatible = "vishay,vcnl4200",
- .data = "VCNL4200",
+ .data = (void *)VCNL4200,
},
{},
};
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index da10e6ccb43c..5920c0085d35 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -4399,6 +4399,7 @@ error2:
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
+ kfree(port);
while (--i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
@@ -4407,6 +4408,7 @@ error1:
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
+ kfree(port);
}
free:
kfree(cm_dev);
@@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
spin_unlock_irq(&cm.state_lock);
ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
+ kfree(port);
}
kfree(cm_dev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 0e3cf3461999..d78f67623f24 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
- goto out;
+ return ret;
}
mutex_unlock(&conn_id->handler_mutex);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 99c4a55545cf..2dd2cfe9b561 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1987,8 +1987,6 @@ static int iw_query_port(struct ib_device *device,
if (!netdev)
return -ENODEV;
- dev_put(netdev);
-
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
@@ -1996,19 +1994,22 @@ static int iw_query_port(struct ib_device *device,
port_attr->state = IB_PORT_DOWN;
port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else {
- inetdev = in_dev_get(netdev);
+ rcu_read_lock();
+ inetdev = __in_dev_get_rcu(netdev);
if (inetdev && inetdev->ifa_list) {
port_attr->state = IB_PORT_ACTIVE;
port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
- in_dev_put(inetdev);
} else {
port_attr->state = IB_PORT_INIT;
port_attr->phys_state =
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
}
+
+ rcu_read_unlock();
}
+ dev_put(netdev);
err = device->ops.query_port(device, port_num, port_attr);
if (err)
return err;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 7a7474000100..65b36548bc17 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto err;
+ goto err_get;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
@@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
- ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
- if (ret)
- goto err_unbind;
-
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
@@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_fill;
}
+ ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ if (ret)
+ goto err_fill;
+
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill:
- rdma_counter_bind_qpn(device, port, qpn, cntn);
-err_unbind:
nlmsg_free(msg);
err:
ib_device_put(device);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 1ab423b19f77..6eb6d2717ca5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
int ret;
rdma_for_each_port (dev, i) {
- is_ib = rdma_protocol_ib(dev, i++);
+ is_ib = rdma_protocol_ib(dev, i);
if (is_ib)
break;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index f67a30fda1ed..163ff7ba92b7 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -451,8 +451,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
* that the hardware will not attempt to access the MR any more.
*/
if (!umem_odp->is_implicit_odp) {
+ mutex_lock(&umem_odp->umem_mutex);
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
kvfree(umem_odp->dma_list);
kvfree(umem_odp->page_list);
}
@@ -719,6 +721,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ lockdep_assert_held(&umem_odp->umem_mutex);
+
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
/* Note that during the run of this function, the
@@ -726,7 +730,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
- mutex_lock(&umem_odp->umem_mutex);
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
if (umem_odp->page_list[idx]) {
@@ -757,7 +760,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
umem_odp->npages--;
}
}
- mutex_unlock(&umem_odp->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index a8b9548bd1a2..599340c1f0b8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
}
}
-static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
+ struct c4iw_debugfs_data *qpd)
{
int space;
int cc;
+ if (id != qp->wq.sq.qid)
+ return 0;
space = qpd->bufsize - qpd->pos - 1;
if (space == 0)
@@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file)
xa_lock_irq(&qpd->devp->qps);
xa_for_each(&qpd->devp->qps, index, qp)
- dump_qp(qp, qpd);
+ dump_qp(index, qp, qpd);
xa_unlock_irq(&qpd->devp->qps);
qpd->buf[qpd->pos++] = 0;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index aa772ee0706f..35c284af574d 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
{
int err;
- struct fw_ri_tpte tpt;
+ struct fw_ri_tpte *tpt;
u32 stag_idx;
static atomic_t key;
if (c4iw_fatal_error(rdev))
return -EIO;
+ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+ if (!tpt)
+ return -ENOMEM;
+
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
+ kfree(tpt);
return -ENOMEM;
}
mutex_lock(&rdev->stats.lock);
@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
/* write TPT entry */
if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
+ memset(tpt, 0, sizeof(*tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
FW_RI_TPTE_STAGSTATE_V(stag_state) |
FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
FW_RI_TPTE_PS_V(page_size));
- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt, skb, wr_waitp);
+ sizeof(*tpt), tpt, skb, wr_waitp);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
}
+ kfree(tpt);
return err;
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index eb9368be28c1..bbcac539777a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
srq->flags = T4_SRQ_LIMIT_SUPPORT;
- ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
- if (ret)
- goto err_free_queue;
-
if (udata) {
srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
if (!srq_key_mm) {
ret = -ENOMEM;
- goto err_remove_handle;
+ goto err_free_queue;
}
srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
if (!srq_db_key_mm) {
@@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm:
kfree(srq_db_key_mm);
err_free_srq_key_mm:
kfree(srq_key_mm);
-err_remove_handle:
- xa_erase_irq(&rhp->qps, srq->wq.qid);
err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
@@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
rhp = srq->rhp;
pr_debug("%s id %d\n", __func__, srq->wq.qid);
-
- xa_erase_irq(&rhp->qps, srq->wq.qid);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 2395fd4233a7..2ed7bfd5feea 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1526,8 +1526,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(tmp_sdma_rht);
goto bail;
+ }
+
dd->sdma_rht = tmp_sdma_rht;
dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 8056930bbe2c..cd9ee1664a69 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2773,6 +2773,10 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
return -ENOMEM;
iwibdev = iwdev->iwibdev;
rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
+ ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
+ if (ret)
+ goto error;
+
ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
if (ret)
goto error;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 59022b744144..d609f4659afb 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1298,29 +1298,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
return 0;
}
-static void devx_free_indirect_mkey(struct rcu_head *rcu)
-{
- kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
-}
-
-/* This function to delete from the radix tree needs to be called before
- * destroying the underlying mkey. Otherwise a race might occur in case that
- * other thread will get the same mkey before this one will be deleted,
- * in that case it will fail via inserting to the tree its own data.
- *
- * Note:
- * An error in the destroy is not expected unless there is some other indirect
- * mkey which points to this one. In a kernel cleanup flow it will be just
- * destroyed in the iterative destruction call. In a user flow, in case
- * the application didn't close in the expected order it's its own problem,
- * the mkey won't be part of the tree, in both cases the kernel is safe.
- */
-static void devx_cleanup_mkey(struct devx_obj *obj)
-{
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
- mlx5_base_mkey(obj->devx_mr.mmkey.key));
-}
-
static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
struct devx_event_subscription *sub)
{
@@ -1362,8 +1339,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
int ret;
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
- devx_cleanup_mkey(obj);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ /*
+ * The pagefault_single_data_segment() does commands against
+ * the mmkey, we must wait for that to stop before freeing the
+ * mkey, as another allocation could get the same mkey #.
+ */
+ xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(obj->devx_mr.mmkey.key));
+ synchronize_srcu(&dev->mr_srcu);
+ }
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
@@ -1382,12 +1367,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
devx_cleanup_subscription(dev, sub_entry);
mutex_unlock(&devx_event_table->event_xa_lock);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
- call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
- devx_free_indirect_mkey);
- return ret;
- }
-
kfree(obj);
return ret;
}
@@ -1491,26 +1470,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
&obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
- err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
- if (err)
- goto obj_destroy;
- }
-
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
- goto err_copy;
+ goto obj_destroy;
if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
-
obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+ if (err)
+ goto obj_destroy;
+ }
return 0;
-err_copy:
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
- devx_cleanup_mkey(obj);
obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2ceaef3ea3fb..1a98ee2e01c4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -606,7 +606,7 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- int live;
+ unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -639,7 +639,6 @@ struct mlx5_ib_mw {
struct mlx5_ib_devx_mr {
struct mlx5_core_mkey mmkey;
int ndescs;
- struct rcu_head rcu;
};
struct mlx5_ib_umr_context {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1eff031ef048..630599311586 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -84,32 +84,6 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
-static void update_odp_mr(struct mlx5_ib_mr *mr)
-{
- if (is_odp_mr(mr)) {
- /*
- * This barrier prevents the compiler from moving the
- * setting of umem->odp_data->private to point to our
- * MR, before reg_umr finished, to ensure that the MR
- * initialization have finished before starting to
- * handle invalidations.
- */
- smp_wmb();
- to_ib_umem_odp(mr->umem)->private = mr;
- /*
- * Make sure we will see the new
- * umem->odp_data->private value in the invalidation
- * routines, before we can get page faults on the
- * MR. Page faults can happen once we put the MR in
- * the tree, below this line. Without the barrier,
- * there can be a fault handling and an invalidation
- * before umem->odp_data->private == mr is visible to
- * the invalidation handler.
- */
- smp_wmb();
- }
-}
-
static void reg_mr_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
@@ -1346,8 +1320,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags);
- update_odp_mr(mr);
-
if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
@@ -1363,10 +1335,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- mr->live = 1;
+ if (is_odp_mr(mr)) {
+ to_ib_umem_odp(mr->umem)->private = mr;
atomic_set(&mr->num_pending_prefetch, 0);
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1441,6 +1415,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if (!mr->umem)
return -EINVAL;
+ if (is_odp_mr(mr))
+ return -EOPNOTSUPP;
+
if (flags & IB_MR_REREG_TRANS) {
addr = virt_addr;
len = length;
@@ -1486,8 +1463,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
mr->allocated_from_cache = 0;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- mr->live = 1;
} else {
/*
* Send a UMR WQE
@@ -1516,7 +1491,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
set_mr_fields(dev, mr, npages, len, access_flags);
- update_odp_mr(mr);
return 0;
err:
@@ -1607,15 +1581,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Prevent new page faults and
* prefetch requests from succeeding
*/
- mr->live = 0;
+ WRITE_ONCE(mr->live, 0);
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&dev->mr_srcu);
/* dequeue pending prefetch requests for the mr */
if (atomic_read(&mr->num_pending_prefetch))
flush_workqueue(system_unbound_wq);
WARN_ON(atomic_read(&mr->num_pending_prefetch));
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
if (!umem_odp->is_implicit_odp)
mlx5_ib_invalidate_range(umem_odp,
@@ -1987,14 +1962,25 @@ free:
int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
+ struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
int err;
- err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
- &mmw->mmkey);
- if (!err)
- kfree(mmw);
- return err;
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ xa_erase(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(mmw->mmkey.key));
+ /*
+ * pagefault_single_data_segment() may be accessing mmw under
+ * SRCU if the user bound an ODP MR to this MW.
+ */
+ synchronize_srcu(&dev->mr_srcu);
+ }
+
+ err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ if (err)
+ return err;
+ kfree(mmw);
+ return 0;
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 2e9b43061797..3f9478d19376 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -178,6 +178,29 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
return;
}
+ /*
+ * The locking here is pretty subtle. Ideally the implicit children
+ * list would be protected by the umem_mutex, however that is not
+ * possible. Instead this uses a weaker update-then-lock pattern:
+ *
+ * srcu_read_lock()
+ * <change children list>
+ * mutex_lock(umem_mutex)
+ * mlx5_ib_update_xlt()
+ * mutex_unlock(umem_mutex)
+ * destroy lkey
+ *
+ * ie any change the children list must be followed by the locked
+ * update_xlt before destroying.
+ *
+ * The umem_mutex provides the acquire/release semantic needed to make
+ * the children list visible to a racing thread. While SRCU is not
+ * technically required, using it gives consistent use of the SRCU
+ * locking around the children list.
+ */
+ lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ lockdep_assert_held(&mr->dev->mr_srcu);
+
odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
nentries * MLX5_IMR_MTT_SIZE, mr);
@@ -202,15 +225,22 @@ static void mr_leaf_free_action(struct work_struct *work)
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ int srcu_key;
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_odp_release(odp);
- if (imr->live)
+ if (smp_load_acquire(&imr->live)) {
+ srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ mutex_lock(&odp_imr->umem_mutex);
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ }
+ ib_umem_odp_release(odp);
mlx5_mr_cache_free(mr->dev, mr);
if (atomic_dec_and_test(&imr->num_leaf_free))
@@ -278,7 +308,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
- mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -289,10 +318,12 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
if (unlikely(!umem_odp->npages && mr->parent &&
!umem_odp->dying)) {
- WRITE_ONCE(umem_odp->dying, 1);
+ WRITE_ONCE(mr->live, 0);
+ umem_odp->dying = 1;
atomic_inc(&mr->parent->num_leaf_free);
schedule_work(&umem_odp->work);
}
+ mutex_unlock(&umem_odp->umem_mutex);
}
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
@@ -429,8 +460,6 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->live = 1;
-
mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
mr->mmkey.key, dev->mdev, mr);
@@ -484,6 +513,8 @@ next_mr:
mtt->parent = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);
+ smp_store_release(&mtt->live, 1);
+
if (!nentries)
start_idx = addr >> MLX5_IMR_MTT_SHIFT;
nentries++;
@@ -536,6 +567,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
init_waitqueue_head(&imr->q_leaf_free);
atomic_set(&imr->num_leaf_free, 0);
atomic_set(&imr->num_pending_prefetch, 0);
+ smp_store_release(&imr->live, 1);
return imr;
}
@@ -555,15 +587,19 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
if (mr->parent != imr)
continue;
+ mutex_lock(&umem_odp->umem_mutex);
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
- if (umem_odp->dying)
+ if (umem_odp->dying) {
+ mutex_unlock(&umem_odp->umem_mutex);
continue;
+ }
- WRITE_ONCE(umem_odp->dying, 1);
+ umem_odp->dying = 1;
atomic_inc(&imr->num_leaf_free);
schedule_work(&umem_odp->work);
+ mutex_unlock(&umem_odp->umem_mutex);
}
up_read(&per_mm->umem_rwsem);
@@ -773,7 +809,7 @@ next_mr:
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!mr->live || !mr->ibmr.pd) {
+ if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
mlx5_ib_dbg(dev, "got dead MR\n");
ret = -EFAULT;
goto srcu_unlock;
@@ -1641,12 +1677,12 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (mr->ibmr.pd != pd) {
+ if (!smp_load_acquire(&mr->live)) {
ret = false;
break;
}
- if (!mr->live) {
+ if (mr->ibmr.pd != pd) {
ret = false;
break;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 6cac0c88cf39..36cdfbdbd325 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -230,8 +230,6 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
pvrdma_page_dir_cleanup(dev, &srq->pdir);
- kfree(srq);
-
atomic_dec(&dev->num_srqs);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 430314c8abd9..52d402f39df9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp)
*/
void siw_qp_llp_write_space(struct sock *sk)
{
- struct siw_cep *cep = sk_to_cep(sk);
+ struct siw_cep *cep;
- cep->sk_write_space(sk);
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (cep) {
+ cep->sk_write_space(sk);
- if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
- (void)siw_sq_start(cep->qp);
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ (void)siw_sq_start(cep->qp);
+ }
+
+ read_unlock(&sk->sk_callback_lock);
}
static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index dace8577fa43..79851923ee57 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
- if (onkey->key_power)
- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 97e3639e99d0..08520b3a18b8 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0 && gpio != -ENOENT) {
- error = gpio;
- goto err_free_mem;
- } else if (!gpio_is_valid(gpio)) {
- /* Skip GPIO if not present */
+ if (!gpio_is_valid(gpio)) {
+ /*
+ * Skip GPIO if not present. Note we deliberately
+ * ignore -EPROBE_DEFER errors here. On some devices
+ * Intel is using so called virtual GPIOs which are not
+ * GPIOs at all but some way for AML code to check some
+ * random status bits without need a custom opregion.
+ * In some cases the resources table we parse points to
+ * such a virtual GPIO, since these are not real GPIOs
+ * we do not have a driver for these so they will never
+ * show up, therefore we ignore -EPROBE_DEFER.
+ */
continue;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 04fe43440a3c..2d8434b7b623 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
leave_breadcrumbs);
}
-static bool elantech_use_host_notify(struct psmouse *psmouse,
- struct elantech_device_info *info)
-{
- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
- return true;
-
- switch (info->bus) {
- case ETP_BUS_PS2_ONLY:
- /* expected case */
- break;
- case ETP_BUS_SMB_HST_NTFY_ONLY:
- case ETP_BUS_PS2_SMB_HST_NTFY:
- /* SMbus implementation is stable since 2018 */
- if (dmi_get_bios_year() >= 2018)
- return true;
- /* fall through */
- default:
- psmouse_dbg(psmouse,
- "Ignoring SMBus bus provider %d\n", info->bus);
- break;
- }
-
- return false;
-}
-
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!elantech_use_host_notify(psmouse, info) ||
+ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
return 0;
}
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+ struct elantech_device_info *info)
+{
+ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ return true;
+
+ switch (info->bus) {
+ case ETP_BUS_PS2_ONLY:
+ /* expected case */
+ break;
+ case ETP_BUS_SMB_ALERT_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_ALERT:
+ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+ break;
+ case ETP_BUS_SMB_HST_NTFY_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_HST_NTFY:
+ return true;
+ default:
+ psmouse_dbg(psmouse,
+ "Ignoring SMBus bus provider %d.\n",
+ info->bus);
+ }
+
+ return false;
+}
+
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 772493b1f665..190b9974526b 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
}
mutex_lock(&data->irq_mutex);
- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
data->irq_count);
/*
* At this point, irq_status has all bits that are set in the
@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
bitmap_copy(data->current_irq_mask, data->new_irq_mask,
data->num_of_irq_regs);
+ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
error_unlock:
mutex_unlock(&data->irq_mutex);
return error;
@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
struct device *dev = &rmi_dev->dev;
mutex_lock(&data->irq_mutex);
+ bitmap_andnot(data->fn_irq_bits,
+ data->fn_irq_bits, mask, data->irq_count);
bitmap_andnot(data->new_irq_mask,
data->current_irq_mask, mask, data->irq_count);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 5178ea8b5f30..fb43aa708660 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -53,6 +53,7 @@ struct goodix_ts_data {
const char *cfg_name;
struct completion firmware_loading_complete;
unsigned long irq_flags;
+ unsigned int contact_size;
};
#define GOODIX_GPIO_INT_NAME "irq"
@@ -62,6 +63,7 @@ struct goodix_ts_data {
#define GOODIX_MAX_WIDTH 4096
#define GOODIX_INT_TRIGGER 1
#define GOODIX_CONTACT_SIZE 8
+#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
#define GOODIX_CONFIG_MAX_LENGTH 240
@@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = {
{}
};
+static const struct dmi_system_id nine_bytes_report[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "Lenovo YogaBook",
+ /* YB1-X91L/F and YB1-X90L/F */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+ }
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
do {
error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
- data, GOODIX_CONTACT_SIZE + 1);
+ data, ts->contact_size + 1);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n",
error);
@@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -EPROTO;
if (touch_num > 1) {
- data += 1 + GOODIX_CONTACT_SIZE;
+ data += 1 + ts->contact_size;
error = goodix_i2c_read(ts->client,
GOODIX_READ_COOR_ADDR +
- 1 + GOODIX_CONTACT_SIZE,
+ 1 + ts->contact_size,
data,
- GOODIX_CONTACT_SIZE *
+ ts->contact_size *
(touch_num - 1));
if (error)
return error;
@@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return 0;
}
-static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[1]);
@@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
+{
+ int id = coor_data[1] & 0x0F;
+ int input_x = get_unaligned_le16(&coor_data[3]);
+ int input_y = get_unaligned_le16(&coor_data[5]);
+ int input_w = get_unaligned_le16(&coor_data[7]);
+
+ input_mt_slot(ts->input_dev, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input_dev, &ts->prop,
+ input_x, input_y, true);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
/**
* goodix_process_events - Process incoming events
*
@@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
- u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
@@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts)
input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
for (i = 0; i < touch_num; i++)
- goodix_ts_report_touch(ts,
- &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+ if (ts->contact_size == 9)
+ goodix_ts_report_touch_9b(ts,
+ &point_data[1 + ts->contact_size * i]);
+ else
+ goodix_ts_report_touch_8b(ts,
+ &point_data[1 + ts->contact_size * i]);
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
@@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
"Applying '180 degrees rotated screen' quirk\n");
}
+ if (dmi_check_system(nine_bytes_report)) {
+ ts->contact_size = 9;
+
+ dev_dbg(&ts->client->dev,
+ "Non-standard 9-bytes report format quirk\n");
+ }
+
error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ts->client = client;
i2c_set_clientdata(client, ts);
init_completion(&ts->firmware_loading_complete);
+ ts->contact_size = GOODIX_CONTACT_SIZE;
error = goodix_get_gpio_config(ts);
if (error)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2369b8af81f3..dd555078258c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- pasid = PPR_PASID(*(u64 *)&event[0]);
+ pasid = (event[0] & EVENT_DOMID_MASK_HI) |
+ (event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
@@ -616,7 +617,7 @@ retry:
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
* to 64 bits.
*/
static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
gfp_t gfp)
{
unsigned long flags;
@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
- if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
- /* address space already 64 bit large */
+ if (address <= PM_LEVEL_SIZE(domain->mode) ||
+ WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode))
- *updated = increase_address_space(domain, gfp) || *updated;
+ *updated = increase_address_space(domain, address, gfp) || *updated;
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9c1612d52e0..17bd5a349119 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -130,8 +130,8 @@
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
-#define EVENT_DOMID_MASK 0xffff
-#define EVENT_DOMID_SHIFT 0
+#define EVENT_DOMID_MASK_LO 0xffff
+#define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b18aac4c105e..7c503a6bc585 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return 0;
out_clear_smmu:
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
smmu_domain->smmu = NULL;
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4c91359057c5..ca51036aa53c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -166,6 +166,9 @@
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
+#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
+#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct io_pgtable *iop;
+ struct arm_lpae_io_pgtable *data;
- if (cfg->ias != 48 || cfg->oas > 40)
+ /* No quirks for Mali (hopefully) */
+ if (cfg->quirks)
+ return NULL;
+
+ if (cfg->ias > 48 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
- iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
- if (iop) {
- u64 mair, ttbr;
- /* Copy values as union fields overlap */
- mair = cfg->arm_lpae_s1_cfg.mair[0];
- ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+ data = arm_lpae_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
- cfg->arm_mali_lpae_cfg.memattr = mair;
- cfg->arm_mali_lpae_cfg.transtab = ttbr |
- ARM_MALI_LPAE_TTBR_READ_INNER |
- ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ /* Mali seems to need a full 4-level table regardless of IAS */
+ if (data->levels < ARM_LPAE_MAX_LEVELS) {
+ data->levels = ARM_LPAE_MAX_LEVELS;
+ data->pgd_size = sizeof(arm_lpae_iopte);
}
+ /*
+ * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
+ * best we can do is mimic the out-of-tree driver and hope that the
+ * "implementation-defined caching policy" is good enough. Similarly,
+ * we'll use it for the sake of a valid attribute for our 'device'
+ * index, although callers should never request that in practice.
+ */
+ cfg->arm_mali_lpae_cfg.memattr =
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- return iop;
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ if (!data->pgd)
+ goto out_free_data;
+
+ /* Ensure the empty pgd is visible before TRANSTAB can be written */
+ wmb();
+
+ cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+ ARM_MALI_LPAE_TTBR_READ_INNER |
+ ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9da8309f7170..237103465b82 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
- irq = platform_get_irq(pdev, 0);
-
/*
* Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
@@ -1106,6 +1104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
/* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) {
+ irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
return irq;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 26290f310f90..4dcbf68dfda4 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -100,6 +100,7 @@ struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
+ int num_irq;
struct clk_bulk_data *clocks;
int num_clocks;
bool reset_disabled;
@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int err, i, irq;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
+ iommu->num_irq = platform_irq_count(pdev);
+ if (iommu->num_irq < 0)
+ return iommu->num_irq;
+
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- i = 0;
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
+
if (irq < 0)
return irq;
@@ -1245,10 +1251,13 @@ err_unprepare_clocks:
static void rk_iommu_shutdown(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
- int i = 0, irq;
+ int i;
+
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
+ }
pm_runtime_force_suspend(&pdev->dev);
}
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index 1a57cee3efab..0b0a73739756 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -15,6 +15,7 @@
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
+#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
@@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+
+ writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+ return 1;
+}
+
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
@@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 6acad2ea0fb3..29333497ba10 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
};
@@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
}
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS 50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 422664ac5f53..1edc99335a94 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
/*
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index c72c036aea76..daefc52b0ec5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- if (!irqd_irq_disabled(d)) {
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
- }
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
}
#endif
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
- /*
- * There is no need to mask/unmask PLIC interrupts. They are "masked"
- * by reading claim and "unmasked" when writing it back.
- */
- .irq_enable = plic_irq_enable,
- .irq_disable = plic_irq_disable,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+ .irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
irq_set_chip_data(irq, NULL);
irq_set_noprobe(irq);
return 0;
@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
hwirq);
else
generic_handle_irq(irq);
- writel(hwirq, claim);
}
csr_set(sie, SIE_SEIE);
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d249cf8ac277..8346e6d1816c 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{
- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
}
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;
- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
- if (!mg)
- return NULL;
+ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
memset(mg, 0, sizeof(*mg));
@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
- if (!cell_prealloc) {
- defer_bio(cache, bio);
- return false;
- }
build_key(oblock, end, &key);
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
- mg_complete(mg, false);
- return -ENOMEM;
- }
/*
* Prevent writes to the block, but allow reads to continue.
@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
}
mg = alloc_migration(cache);
- if (!mg) {
- policy_complete_background_work(cache->policy, op, false);
- background_work_end(cache);
- return -ENOMEM;
- }
mg->op = op;
mg->overwrite_bio = bio;
@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- invalidate_complete(mg, false);
- return -ENOMEM;
- }
build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
r = dm_cell_lock_v2(cache->prison, &key,
@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -EPERM;
mg = alloc_migration(cache);
- if (!mg) {
- background_work_end(cache);
- return -ENOMEM;
- }
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index cd6f9e9fc98e..4ca8f1977222 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
*
* NOTE: Must be called with the bucket lock held
*/
-struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
- unsigned long region_nr)
+static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
+ unsigned long region_nr)
{
struct dm_clone_region_hydration *hd;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f150f5c5492b..4fb1a40e68a0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -18,7 +18,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
#include "dm.h"
@@ -107,8 +106,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
- /* Maximum number of in-flight COW jobs. */
- struct semaphore cow_count;
+ unsigned in_progress;
+ struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@@ -162,8 +161,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+ init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
dm_put_device(ti, s->origin);
+ WARN_ON(s->in_progress);
+
kfree(s);
}
+static void account_start_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ s->in_progress++;
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ BUG_ON(!s->in_progress);
+ s->in_progress--;
+ if (likely(s->in_progress <= cow_threshold) &&
+ unlikely(waitqueue_active(&s->in_progress_wait)))
+ wake_up_locked(&s->in_progress_wait);
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+ if (unlikely(s->in_progress > cow_threshold)) {
+ spin_lock(&s->in_progress_wait.lock);
+ if (likely(s->in_progress > cow_threshold)) {
+ /*
+ * NOTE: this throttle doesn't account for whether
+ * the caller is servicing an IO that will trigger a COW
+ * so excess throttling may result for chunks not required
+ * to be COW'd. But if cow_threshold was reached, extra
+ * throttling is unlikely to negatively impact performance.
+ */
+ DECLARE_WAITQUEUE(wait, current);
+ __add_wait_queue(&s->in_progress_wait, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&s->in_progress_wait.lock);
+ if (unlock_origins)
+ up_read(&_origins_lock);
+ io_schedule();
+ remove_wait_queue(&s->in_progress_wait, &wait);
+ return false;
+ }
+ spin_unlock(&s->in_progress_wait.lock);
+ }
+ return true;
+}
+
/*
* Flush a list of buffers.
*/
@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
}
}
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- r = do_origin(s->origin, bio);
+ r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
- up(&s->cow_count);
+ account_end_copy(s);
}
/*
@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- down(&s->cow_count);
+ account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
- down(&s->cow_count);
+ account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
struct bio *bio = context;
struct dm_snapshot *s = bio->bi_private;
- up(&s->cow_count);
+ account_end_copy(s);
bio->bi_status = write_err ? BLK_STS_IOERR : 0;
bio_endio(bio);
}
@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
dest.sector = bio->bi_iter.bi_sector;
dest.count = s->store->chunk_size;
- down(&s->cow_count);
+ account_start_copy(s);
WARN_ON_ONCE(bio->bi_private);
bio->bi_private = s;
dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return DM_MAPIO_KILL;
+ if (bio_data_dir(bio) == WRITE) {
+ while (unlikely(!wait_for_in_progress(s, false)))
+ ; /* wait_for_in_progress() has slept */
+ }
+
down_read(&s->lock);
dm_exception_table_lock(&lock);
@@ -2112,7 +2163,7 @@ redirect_to_origin:
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
- return do_origin(s->origin, bio);
+ return do_origin(s->origin, bio, false);
}
out_unlock:
@@ -2487,15 +2538,24 @@ next_snapshot:
/*
* Called on a write from the origin driver.
*/
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
+again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
- if (o)
+ if (o) {
+ if (limit) {
+ struct dm_snapshot *s;
+ list_for_each_entry(s, &o->snapshots, list)
+ if (unlikely(!wait_for_in_progress(s, true)))
+ goto again;
+ }
+
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+ }
up_read(&_origins_lock);
return r;
@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
- return do_origin(o->dev, bio);
+ return do_origin(o->dev, bio, true);
}
/*
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f61693e59684..1e772287b1c8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -ENOTSUPP;
goto abort;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index cfca3c70599b..21f90a887485 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -643,8 +643,7 @@ static int v4l_stk_release(struct file *fp)
dev->owner = NULL;
}
- if (is_present(dev))
- usb_autopm_put_interface(dev->interface);
+ usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->lock);
return v4l2_fh_release(fp);
}
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 32747425297d..64fff6abe60e 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
- goto err_out;
+ goto err_out_int;
}
jm = kzalloc(sizeof(struct jmb38x_ms)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 47ae84afac2e..1b1a794d639d 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -527,6 +527,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
FASTRPC_PHYS(buffer->phys), buffer->size);
if (ret < 0) {
dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+ kfree(a);
return -EINVAL;
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 32e9b1aed2ca..0a2b99e1af45 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -218,13 +218,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
+ /* No need to enable the client if nothing is needed from it */
+ if (!cldev->bus->fw_f_fw_ver_supported &&
+ !cldev->bus->hbm_f_os_supported)
+ return;
+
ret = mei_cldev_enable(cldev);
if (ret)
return;
- ret = mei_fwver(cldev);
- if (ret < 0)
- dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ if (cldev->bus->fw_f_fw_ver_supported) {
+ ret = mei_fwver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "FW version command failed %d\n",
+ ret);
+ }
if (cldev->bus->hbm_f_os_supported) {
ret = mei_osver(cldev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 77f7dff7098d..c09f8bb49495 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -79,6 +79,9 @@
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index abe1b1f4362f..c4f6991d3028 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1355,6 +1355,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
+#define MEI_CFG_FW_VER_SUPP \
+ .fw_ver_supported = 1
#define MEI_CFG_ICH_HFS \
.fw_status.count = 0
@@ -1392,31 +1394,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
MEI_CFG_ICH10_HFS,
};
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
MEI_CFG_PCH_HFS,
};
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_NM,
};
/* PCH8 Lynx Point and newer devices */
static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
static const struct mei_cfg mei_me_pch8_sps_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
};
/* Cannon Lake and newer devices */
static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
};
@@ -1428,7 +1440,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_UNDEF_CFG] = NULL,
[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
- [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+ [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+ [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1473,6 +1486,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
hw->cfg = cfg;
+ dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
return dev;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 08c84a0de4a8..1d8794828cbc 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -20,11 +20,13 @@
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
+ u32 fw_ver_supported:1;
};
@@ -62,7 +64,8 @@ struct mei_me_hw {
* @MEI_ME_UNDEF_CFG: Lower sentinel.
* @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices.
* @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG: Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG: Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG: Platform Controller Hub platforms (Gen7).
* @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
@@ -77,7 +80,8 @@ enum mei_cfg_idx {
MEI_ME_UNDEF_CFG,
MEI_ME_ICH_CFG,
MEI_ME_ICH10_CFG,
- MEI_ME_PCH_CFG,
+ MEI_ME_PCH6_CFG,
+ MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f71a023aed3c..0f2141178299 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -426,6 +426,8 @@ struct mei_fw_version {
*
* @fw_ver : FW versions
*
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
* @me_clients_map : FW clients bit map
@@ -506,6 +508,8 @@ struct mei_device {
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
+ unsigned int fw_f_fw_ver_supported:1;
+
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index d5a92c6eadb3..3dca63eddaa0 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -61,13 +61,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index d4ada5cca2d1..234551a68739 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -646,8 +646,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct tmio_mmc_dma *dma_priv;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
+ int num_irqs, irq, ret, i;
struct resource *res;
- int irq, ret, i;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
@@ -825,24 +825,31 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->hs400_complete = renesas_sdhi_hs400_complete;
}
- i = 0;
- while (1) {
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ ret = num_irqs;
+ goto eirq;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!num_irqs) {
+ ret = -ENXIO;
+ goto eirq;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
+ if (irq < 0) {
+ ret = irq;
+ goto eirq;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
-
dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 2b9cdcd1dd9d..f4f5f0a70cda 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -262,6 +262,7 @@ static const struct sdhci_iproc_data bcm2835_data = {
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.ops = &sdhci_iproc_32only_ops,
};
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 81bd9afb0980..98c575de43c7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
const char *name;
irq[0] = platform_get_irq(pdev, 0);
- irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0) {
- dev_err(dev, "Get irq error\n");
+ irq[1] = platform_get_irq_optional(pdev, 1);
+ if (irq[0] < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 97a97a9ccc36..e10b76089048 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
/**
* au_read_buf16 - read chip data into buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 16bit buswidth
*/
-static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
u16 *p = (u16 *) buf;
len >>= 1;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 1d8621d43160..7acf4a93b592 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -487,7 +487,7 @@ static int write_sr(struct spi_nor *nor, u8 val)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
return spi_mem_exec_op(nor->spimem, &op);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 931d9d935686..21d8fcc83c9c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4039,7 +4039,7 @@ out:
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; idx < old_arr->count; idx++) {
+ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 526ba2ab66f1..cc3536315eff 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1845,7 +1845,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
loc = B53_EG_MIR_CTL;
b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
- reg &= ~MIRROR_MASK;
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index a23d3ffdf0c4..24a5e99f7fd5 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev)
{
int i;
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz8795_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index d0f8153e86b7..8b00f8e6c02f 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
static int ksz8795_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz8795_regmap_config
- [i]);
+ rc = ksz8795_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 0b1e01f0873d..fdffd9e0c518 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
static int ksz9477_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *i2c_id)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_i2c(i2c,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&i2c->dev,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 2938e892b631..16939f29faa5 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2018 Microchip Technology Inc.
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index f4198d6f72be..c5f64959a184 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
static int ksz9477_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b0b870f0c252..fe47180c908b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -436,7 +436,7 @@ int ksz_switch_register(struct ksz_device *dev,
}
mutex_init(&dev->dev_mutex);
- mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->regmap_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index dd60d0837fc6..a20ebb749377 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip switch driver common header
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
*/
@@ -47,7 +47,7 @@ struct ksz_device {
const char *name;
struct mutex dev_mutex; /* device access */
- struct mutex stats_mutex; /* status access */
+ struct mutex regmap_mutex; /* regmap access */
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
const struct ksz_dev_ops *dev_ops;
@@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
+static inline void ksz_regmap_lock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_unlock(mtx);
+}
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
@@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
.write_flag_mask = \
KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
regbits, regpad), \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
.reg_format_endian = REGMAP_ENDIAN_BIG, \
.val_format_endian = REGMAP_ENDIAN_BIG \
}
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index e53e494c22e0..fbb564c3beb8 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_H
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 740dadf43f01..1fc0d13dc623 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_DYNAMIC_CONFIG_H
#define _SJA1105_DYNAMIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index af456b0a4d27..394e12a6ad59 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_PTP_H
#define _SJA1105_PTP_H
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 7f87022a2d61..f4a5c5c04311 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0b803c30e640..0aad212d88b2 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_TAS_H
#define _SJA1105_TAS_H
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index b4a0fb281e69..bb65dd39f847 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic_set_packet_filter(aq_nic, ndev->flags);
-
- aq_nic_set_multicast_list(aq_nic, ndev);
+ (void)aq_nic_set_multicast_list(aq_nic, ndev);
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 8f66e7817811..137c1de4c6ec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -631,9 +631,12 @@ err_exit:
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
- unsigned int packet_filter = self->packet_filter;
+ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ unsigned int packet_filter = ndev->flags;
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
+ int err = 0;
self->mc_list.count = 0;
if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
@@ -641,29 +644,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
} else {
netdev_for_each_uc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
}
}
- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_ALLMULTI;
- } else {
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
+ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+ if (cfg->is_mc_list_enabled) {
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
+ } else {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++],
+ ha->addr);
+ }
}
}
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ err = hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ return err;
}
return aq_nic_set_packet_filter(self, packet_filter);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3901d7994ca1..76bdbe1596d6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
break;
buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
@@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = 0;
goto err_exit;
}
- if (buff->is_error) {
+ if (buff->is_error || buff->is_cso_err) {
buff_ = buff;
do {
next_ = buff_->next,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 30f7fc4c97ff..2ad3fa6316ce 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
cfg->is_vlan_force_promisc);
hw_atl_rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST));
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
- cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
@@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
+ int err;
+ u32 val;
+
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
- hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
- return aq_hw_err_from_flags(self);
+ err = aq_hw_err_from_flags(self);
+
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 1149812ae463..6f340695e6bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
{
+ u32 val;
+
+ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
- init);
+ val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 0c37abbabca5..c3ee278c3747 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
-/* set rdm rx dma descriptor cache init */
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index c3febcdfa92e..35887ad89025 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -318,6 +318,25 @@
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index da726489e3c8..7bc51f8d6f2f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
/* Convert PHY temperature from 1/256 degree Celsius
* to 1/1000 degree Celsius.
*/
- *temp = temp_res * 1000 / 256;
+ *temp = (temp_res & 0xFFFF) * 1000 / 256;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e24f5d2b6afe..53055ce5dfd6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
default y
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
- select DIMLIB
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -69,6 +68,7 @@ config BCMGENET
select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
@@ -188,6 +188,7 @@ config SYSTEMPORT
select MII
select PHYLIB
select FIXED_PHY
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
@@ -200,6 +201,7 @@ config BNXT
select LIBCRC32C
select NET_DEVLINK
select PAGE_POOL
+ select DIMLIB
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 12cb77ef1081..0f138280315a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2018,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
*/
if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ int0_enable |= UMAC_IRQ_PHY_DET_R;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -2611,11 +2613,14 @@ static void bcmgenet_irq_task(struct work_struct *work)
priv->irq0_stat = 0;
spin_unlock_irq(&priv->lock);
+ if (status & UMAC_IRQ_PHY_DET_R &&
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+ phy_init_hw(priv->dev->phydev);
+
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT) {
- priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->dev->phydev);
- }
+
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
/* all other interested interrupts handled in bottom half */
- status &= UMAC_IRQ_LINK_EVENT;
+ status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
if (status) {
/* Save irq status for bottom-half processing. */
spin_lock_irqsave(&priv->lock, flags);
@@ -2874,6 +2879,12 @@ static int bcmgenet_open(struct net_device *dev)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ ret = bcmgenet_mii_connect(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_clk_disable;
+ }
+
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
@@ -2883,6 +2894,12 @@ static int bcmgenet_open(struct net_device *dev)
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ ret = bcmgenet_mii_config(dev, true);
+ if (ret) {
+ netdev_err(dev, "unsupported PHY\n");
+ goto err_disconnect_phy;
+ }
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
@@ -2898,7 +2915,7 @@ static int bcmgenet_open(struct net_device *dev)
ret = bcmgenet_init_dma(priv);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
- goto err_clk_disable;
+ goto err_disconnect_phy;
}
/* Always enable ring 16 - descriptor ring */
@@ -2921,25 +2938,19 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
- ret = bcmgenet_mii_probe(dev);
- if (ret) {
- netdev_err(dev, "failed to connect to PHY\n");
- goto err_irq1;
- }
-
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
-err_irq1:
- free_irq(priv->irq1, priv);
err_irq0:
free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv);
+err_disconnect_phy:
+ phy_disconnect(dev->phydev);
err_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3620,6 +3631,8 @@ static int bcmgenet_resume(struct device *d)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ phy_init_hw(dev->phydev);
+
bcmgenet_umac_reset(priv);
init_umac(priv);
@@ -3628,8 +3641,6 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(dev->phydev);
-
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 4a8fc03d82fd..7fbf573d8d52 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN_V123 (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
@@ -719,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_connect(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
-int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 970e478a9017..17bb8d60a157 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -173,6 +173,46 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update);
}
+int bcmgenet_mii_connect(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct phy_device *phydev;
+ u32 phy_flags = 0;
+ int ret;
+
+ /* Communicate the integrated PHY revision */
+ if (priv->internal_phy)
+ phy_flags = priv->gphy_rev;
+
+ /* Initialize link state variables that bcmgenet_mii_setup() uses */
+ priv->old_link = -1;
+ priv->old_speed = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+
+ if (dn) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -258,74 +298,29 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
+ reg |= id_mode_dis;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ reg |= RGMII_MODE_EN_V123;
+ else
+ reg |= RGMII_MODE_EN;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
-
- return 0;
-}
-
-int bcmgenet_mii_probe(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
- struct phy_device *phydev;
- u32 phy_flags;
- int ret;
-
- /* Communicate the integrated PHY revision */
- phy_flags = priv->gphy_rev;
-
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
- if (dn) {
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- } else {
- phydev = dev->phydev;
- phydev->dev_flags = phy_flags;
+ if (init) {
+ linkmode_copy(phydev->advertising, phydev->supported);
- ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
- if (ret) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- }
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
+ */
+ if (priv->internal_phy && !GENET_IS_V5(priv))
+ phydev->irq = PHY_IGNORE_INTERRUPT;
- /* Configure port multiplexer based on what the probed PHY device since
- * reading the 'max-speed' property determines the maximum supported
- * PHY speed which is needed for bcmgenet_mii_config() to configure
- * things appropriately.
- */
- ret = bcmgenet_mii_config(dev, true);
- if (ret) {
- phy_disconnect(dev->phydev);
- return ret;
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
- /* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
- * that prevents the signaling of link UP interrupts when
- * the link operates at 10Mbps, so fallback to polling for
- * those versions of GENET.
- */
- if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
-
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8e8d557901a9..1e1b774e1953 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
}
- *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
+ return PTR_ERR(*tx_clk);
- *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+ *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
+ return PTR_ERR(*rx_clk);
- *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
if (IS_ERR(*tsu_clk))
- *tsu_clk = NULL;
+ return PTR_ERR(*tsu_clk);
err = clk_prepare_enable(*pclk);
if (err) {
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
index be2bafc7beeb..a04eccbc78e8 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.h
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
* Copyright (c) 2003-2015, 2017 Cavium, Inc.
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 162d7d8fb295..19379bae0144 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1235,6 +1235,8 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
priv->rx_td_enabled = enable;
}
+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
+
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
@@ -1261,6 +1263,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
goto out;
if (state.up) {
+ update_tx_fqids(priv);
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
} else {
@@ -2533,6 +2536,47 @@ static int set_pause(struct dpaa2_eth_priv *priv)
return 0;
}
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_queue_id qid = {0};
+ struct dpaa2_eth_fq *fq;
+ struct dpni_queue queue;
+ int i, j, err;
+
+ /* We only use Tx FQIDs for FQID-based enqueue, so check
+ * if DPNI version supports it before updating FQIDs
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ return;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, j, fq->flowid,
+ &queue, &qid);
+ if (err)
+ goto out_err;
+
+ fq->tx_fqid[j] = qid.fqid;
+ if (fq->tx_fqid[j] == 0)
+ goto out_err;
+ }
+ }
+
+ priv->enqueue = dpaa2_eth_enqueue_fq;
+
+ return;
+
+out_err:
+ netdev_info(priv->net_dev,
+ "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -3306,6 +3350,9 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+ set_mac_addr(netdev_priv(net_dev));
+
return IRQ_HANDLED;
}
@@ -3331,7 +3378,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
}
err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
- DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+ DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
if (err < 0) {
dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
goto free_irq;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index fd583911b6c0..ee0711d06b3a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io *mc_io,
*/
#define DPNI_IRQ_INDEX 0
/**
- * IRQ event - indicates a change in link state
+ * IRQ events:
+ * indicates a change in link state
+ * indicates a change in endpoint
*/
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index c4b7bf851a28..75ccc1e7076b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,8 @@
#define HNAE3_MOD_VERSION "1.0"
+#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fd7f94372ff0..e02e01bd9eff 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msi = hdev->num_roce_msi;
+
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
@@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ hdev->num_nic_msi = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
}
return 0;
@@ -1507,6 +1519,10 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
return 0;
}
@@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
int vectors;
int i;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
@@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
hdev->roce_base_msix_offset;
@@ -3903,6 +3921,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 3e9574a9e22d..c3d56b872ed7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -763,6 +763,7 @@ struct hclge_dev {
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
+ u16 num_nic_msi; /* Num of nic vectors for this PF */
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 9f0e35f27789..62399cc1c5a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* if user not set rss, the rss_size should compare with the
+ * valid msi numbers to ensure one to one map between tqp and
+ * irq as default.
+ */
+ if (!kinfo->req_rss_size)
+ max_rss_size = min_t(u16, max_rss_size,
+ (hdev->num_nic_msi - 1) /
+ kinfo->num_tc);
+
/* Set to the maximum specification value (max_rss_size). */
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
- kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e3090b3dab1d..7d7e712691b9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->tqp[i] = &hdev->htqp[i].q;
}
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
+ * and rss size with the actual vector numbers
+ */
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+ kinfo->rss_size);
+
return 0;
}
@@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
@@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ if (hnae3_dev_roce_supported(hdev))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
hdev->num_msi,
PCI_IRQ_MSIX);
else
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
@@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
@@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
req = (struct hclgevf_query_res_cmd *)desc.data;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
@@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msix = hdev->num_roce_msix;
+
/* VF should have NIC vectors and Roce vectors, NIC vectors
* are queued before Roce vectors. The offset is fixed to 64.
*/
@@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ hdev->num_nic_msix = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for vf(min:2).\n",
+ hdev->num_nic_msix);
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index bdde3afc286b..2b8d6bc6d224 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -270,6 +270,7 @@ struct hclgevf_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
u16 num_roce_msix; /* Num of roce vectors for this VF */
u16 roce_base_msix_offset;
int roce_base_vector;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 211c5f74b4c8..aec7e98bcc85 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,6 +96,8 @@
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
+
#define DMA_WBACK(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
unregister_netdev (dev);
dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
free_netdev (dev);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 1274ad24d6af..f9742af7f142 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
&lp->dma_addr, GFP_KERNEL,
- DMA_ATTR_NON_CONSISTENT);
+ LIB82596_DMA_ATTR);
if (!dma) {
printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
i = register_netdev(dev);
if (i) {
dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ dma, lp->dma_addr, LIB82596_DMA_ATTR);
return i;
}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6eb6c2ff7f09..6436a98c5953 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -24,6 +24,8 @@
static const char sni_82596_string[] = "snirm_82596";
+#define LIB82596_DMA_ATTR 0
+
#define DMA_WBACK(priv, addr, len) do { } while (0)
#define DMA_INV(priv, addr, len) do { } while (0)
#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
unregister_netdev(dev);
dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 2b073a3c0b84..f59d9a8e35e2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
+ struct irq_desc *desc = irq_to_desc(scrq->irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
- rc = plpar_hcall_norets(H_EOI, val);
- if (rc)
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ chip->irq_eoi(&desc->irq_data);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c61069340f4f..703adb96429e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
ge_mode = 0;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 9231b39d18b2..c501bf2a0252 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct xarray *mkeys = &dev->priv.mkey_table;
- struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
xa_lock_irqsave(mkeys, flags);
- deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
+ __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
xa_unlock_irqrestore(mkeys, flags);
- if (!deleted_mkey) {
- mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
- mlx5_base_mkey(mkey->key));
- return -ENOENT;
- }
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 4187f2b112b8..e8b656075c6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -788,12 +788,10 @@ again:
* it means that all the previous stes are the same,
* if so, this rule is duplicated.
*/
- if (mlx5dr_ste_is_last_in_rule(nic_matcher,
- matched_ste->ste_chain_location)) {
- mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
- return NULL;
- }
- return matched_ste;
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+ return matched_ste;
+
+ mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
}
if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899450b28621..7c03b661ae7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
devlink = priv_to_devlink(mlxsw_sp->core);
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
local_port);
+ skb_push(skb, ETH_HLEN);
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
consume_skb(skb);
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 141571e2ec11..544012a67221 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1356,9 +1356,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
- /* Reset the ethernet controller */
- __lpc_eth_reset(pldat);
-
/* then shut everything down to save power */
__lpc_eth_shutdown(pldat);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 812190e729c2..6a95b42a8d8c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -182,6 +182,8 @@ struct ionic_lif {
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
+#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
+#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index e2907884f843..03916b6d47f2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
@@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
@@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
@@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
int i, q_num;
ionic_get_lif_stats(lif, &lif_stats);
@@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txstats = &lif_to_txstats(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+ **buf = IONIC_READ_STAT64(txstats,
&ionic_tx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
@@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txqcq->stats->tx.sg_cntr[i];
+ **buf = txstats->sg_cntr[i];
(*buf)++;
}
}
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxstats = &lif_to_rxstats(lif, q_num);
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+ **buf = IONIC_READ_STAT64(rxstats,
&ionic_rx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 74f81fe03810..350b0d949611 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4146,6 +4146,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
rtl_lock_config_regs(tp);
}
+static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
+{
+ if (mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
+}
+
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -4442,11 +4450,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN) {
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
- PCI_EXP_DEVCTL_NOSNOOP_EN);
- }
}
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4462,9 +4465,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
}
@@ -4490,9 +4490,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -4503,9 +4500,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
/* Magic. */
RTL_W8(tp, DBG_REG, 0x20);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -4611,9 +4605,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
/* Reset tx FIFO pointer */
@@ -4636,9 +4627,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -5485,6 +5473,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
+ rtl_jumbo_config(tp, tp->dev->mtu);
+
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R16(tp, CPlusCmd);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -5498,10 +5488,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- if (new_mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
+ rtl_jumbo_config(tp, new_mtu);
dev->mtu = new_mtu;
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f97a4096f8fc..ddcc191febdb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
}
}
} else {
- netdev_info(dev, "Too many address, switching to promiscuous\n");
+ if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
v = EMAC_FRM_FLT_RXALL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 2cb9c53f93b8..5a7b0aca1d31 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -448,7 +448,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_HPF;
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 3f4f3132e16b..e436fa160c7d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
if (!enable) {
val |= PPSCMDx(index, 0x5);
+ val |= PPSEN0;
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c76a1336a451..3dfd04e0506a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2610,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -4742,8 +4742,10 @@ int stmmac_suspend(struct device *dev)
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable(priv->plat->pclk);
- clk_disable(priv->plat->stmmac_clk);
+ if (priv->plat->clk_ptp_ref)
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
}
mutex_unlock(&priv->lock);
@@ -4806,8 +4808,10 @@ int stmmac_resume(struct device *dev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk previously disabled */
- clk_enable(priv->plat->stmmac_clk);
- clk_enable(priv->plat->pclk);
+ clk_prepare_enable(priv->plat->stmmac_clk);
+ clk_prepare_enable(priv->plat->pclk);
+ if (priv->plat->clk_ptp_ref)
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 173493db038c..df638b18b72c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
/* structure describing a PTP hardware clock */
static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
- .name = "stmmac_ptp_clock",
+ .name = "stmmac ptp",
.max_adj = 62500000,
.n_alarm = 0,
.n_ext_ts = 0,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index cc76a42c7466..e4ac3c401432 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -487,8 +487,8 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
static int stmmac_test_hfilt(struct stmmac_priv *priv)
{
- unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
- unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+ unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+ unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
struct stmmac_packet_attrs attr = { };
int ret;
@@ -496,6 +496,9 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
if (ret)
return ret;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
ret = dev_mc_add(priv->dev, gd_addr);
if (ret)
return ret;
@@ -573,6 +576,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all MC addresses */
__dev_mc_unsync(priv->dev, NULL);
@@ -611,6 +616,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all UC addresses */
__dev_uc_unsync(priv->dev, NULL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index e231098061b6..f9a9a9d82233 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
return NULL;
}
-struct {
+static struct {
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry);
} tc_flow_parsers[] = {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index a65edd2770e6..37ba708ac781 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{
int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
int free_rx_num = 0, free_tx_num = 0;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index f61d094746c0..1a251f76d09b 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
void nsim_fib_exit(void)
{
- unregister_pernet_subsys(&nsim_fib_net_ops);
unregister_fib_notifier(&nsim_fib_nb);
+ unregister_pernet_subsys(&nsim_fib_net_ops);
}
int nsim_fib_init(void)
@@ -258,6 +258,7 @@ int nsim_fib_init(void)
err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
if (err < 0) {
pr_err("Failed to register fib notifier\n");
+ unregister_pernet_subsys(&nsim_fib_net_ops);
goto err_out;
}
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8fc33867e524..af8eabe7a6d4 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.name = _name, \
/* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
+ .soft_reset = genphy_soft_reset, \
.config_init = bcm7xxx_config_init, \
.suspend = bcm7xxx_suspend, \
.resume = bcm7xxx_config_init, \
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2fea5541c35a..63dedec0433d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
+ const u32 ksz_phy_id)
+{
+ int ret;
+
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+ return 0;
+
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+
+ /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
+ * exact PHY ID. However, they can be told apart by the extended
+ * capability registers presence. The KSZ8051 PHY has them while
+ * the switch does not.
+ */
+ ret &= BMSR_ERCAP;
+ if (ksz_phy_id == PHY_ID_KSZ8051)
+ return ret;
+ else
+ return !ret;
+}
+
+static int ksz8051_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+}
+
static int ksz8081_config_init(struct phy_device *phydev)
{
/* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
+static int ksz8795_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8051,
- .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8051_type,
@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
+ .match_phy_device = ksz8051_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8795,
- .phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ8795",
+ .name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .match_phy_device = ksz8795_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 7935593debb1..a1caeee12236 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
{
int val;
+ linkmode_zero(phydev->lp_advertising);
+
val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
if (val < 0)
return val;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 119e6f466056..105d389b58e7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -572,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev)
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
- /* Invalidate LP advertising flags */
- linkmode_zero(phydev->lp_advertising);
-
err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9d2bbb13293e..adb66a2fae18 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1787,7 +1787,14 @@ int genphy_read_lpa(struct phy_device *phydev)
{
int lpa, lpagb;
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ 0);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ return 0;
+ }
+
if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
@@ -1815,6 +1822,8 @@ int genphy_read_lpa(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
}
return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a5a57ca94c1a..20e2ebe458f2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -576,7 +576,7 @@ static int phylink_register_sfp(struct phylink *pl,
/**
* phylink_create() - create a phylink instance
- * @ndev: a pointer to the &struct net_device
+ * @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
* interface
* @iface: the desired link mode defined by &typedef phy_interface_t
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 812dc3a65efb..a8d3141582a5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
- if (e->queue_index != queue_index)
- e->queue_index = queue_index;
+ if (READ_ONCE(e->queue_index) != queue_index)
+ WRITE_ONCE(e->queue_index, queue_index);
if (e->updated != jiffies)
e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a505b2ab88b8..74849da031fa 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -186,7 +186,7 @@ struct hso_tiocmget {
int intr_completed;
struct usb_endpoint_descriptor *endp;
struct urb *urb;
- struct hso_serial_state_notification serial_state_notification;
+ struct hso_serial_state_notification *serial_state_notification;
u16 prev_UART_state_bitmap;
struct uart_icount icount;
};
@@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial,
usb_rcvintpipe(usb,
tiocmget->endp->
bEndpointAddress & 0x7F),
- &tiocmget->serial_state_notification,
+ tiocmget->serial_state_notification,
sizeof(struct hso_serial_state_notification),
tiocmget_intr_callback, serial,
tiocmget->endp->bInterval);
@@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb)
/* wIndex should be the USB interface number of the port to which the
* notification applies, which should always be the Modem port.
*/
- serial_state_notification = &tiocmget->serial_state_notification;
+ serial_state_notification = tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
@@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial)
usb_free_urb(tiocmget->urb);
tiocmget->urb = NULL;
serial->tiocmget = NULL;
+ kfree(tiocmget->serial_state_notification);
+ tiocmget->serial_state_notification = NULL;
kfree(tiocmget);
}
}
@@ -2615,10 +2617,13 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ serial->tiocmget->serial_state_notification
+ = kzalloc(sizeof(struct hso_serial_state_notification),
+ GFP_KERNEL);
/* it isn't going to break our heart if serial->tiocmget
* allocation fails don't bother checking this.
*/
- if (serial->tiocmget) {
+ if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
tiocmget = serial->tiocmget;
tiocmget->endp = hso_get_ep(interface,
USB_ENDPOINT_XFER_INT,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 58f5a219fb65..62948098191f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3782,10 +3782,14 @@ static int lan78xx_probe(struct usb_interface *intf,
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
+ ret = lan78xx_phy_init(dev);
+ if (ret < 0)
+ goto out4;
+
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out4;
+ goto out5;
}
usb_set_intfdata(intf, dev);
@@ -3798,14 +3802,10 @@ static int lan78xx_probe(struct usb_interface *intf,
pm_runtime_set_autosuspend_delay(&udev->dev,
DEFAULT_AUTOSUSPEND_DELAY);
- ret = lan78xx_phy_init(dev);
- if (ret < 0)
- goto out5;
-
return 0;
out5:
- unregister_netdev(netdev);
+ phy_disconnect(netdev->phydev);
out4:
usb_free_urb(dev->urb_intr);
out3:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3d77cd402ba9..596428ec71df 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index c5d4a0060124..681e0def6356 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
- __le16 res;
+ __le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index dc45d16e8d21..383d4fa555a8 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
- if (!uart_print && ar->hw_params.uart_pin_workaround) {
- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
- ar->hw_params.uart_pin);
- if (ret) {
- ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
- return ret;
+ if (!uart_print) {
+ if (ar->hw_params.uart_pin_workaround) {
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+ ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "failed to set UART TX pin: %d",
+ ret);
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 7573af2d88ce..c2db758b9d54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 5c8602de9168..87421807e040 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size)
if (new_page)
__free_page(new_page);
}
+ kfree(table);
return NULL;
}
alloc_size = min_t(int, size, PAGE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index f8e4f0f5de0c..f09e368c7040 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
*/
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return ofs + trans->cfg->trans.umac_prph_offset;
+ return ofs + trans->trans_cfg->umac_prph_offset;
}
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
return iwl_read_prph_no_grab(trans, ofs +
- trans->cfg->trans.umac_prph_offset);
+ trans->trans_cfg->umac_prph_offset);
}
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
+ return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
}
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset,
+ iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
val);
}
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val);
+ iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
}
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
return iwl_poll_prph_bit(trans, addr +
- trans->cfg->trans.umac_prph_offset,
+ trans->trans_cfg->umac_prph_offset,
bits, mask, timeout);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 32a5e4e5461f..d9eb2b286438 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
+ if (mvm->trans->cfg->tx_with_siso_diversity)
+ init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
lockdep_assert_held(&mvm->mutex);
mvm->rfkill_safe_init_done = false;
@@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
mvm->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
@@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
* firmware versions. Unfortunately, we don't have a TLV API
* flag to rely on, so rely on the major version which is in
* the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to29 and 17.
- * The intention was to have it in 36 as well, but not all
- * 8000 family got this feature enabled. The 8000 family is
- * the only one using version 36, so skip this version
- * entirely.
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+ ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
enabled = &wifi_pkg->package.elements[1];
if (enabled->type != ACPI_TYPE_INTEGER ||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index cd1b10042fbf..d31f96c3f925 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4881,11 +4881,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
if (!iwl_mvm_has_new_rx_api(mvm))
return;
- notif->cookie = mvm->queue_sync_cookie;
-
- if (notif->sync)
+ if (notif->sync) {
+ notif->cookie = mvm->queue_sync_cookie;
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
+ }
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
size, !notif->sync);
@@ -4905,7 +4905,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
out:
atomic_set(&mvm->queue_sync_counter, 0);
- mvm->queue_sync_cookie++;
+ if (notif->sync)
+ mvm->queue_sync_cookie++;
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 75fa8a6aafee..74980382e64c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -107,13 +107,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
- if (ret) {
- dma_free_coherent(trans->dev,
- sizeof(*prph_scratch),
- prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- return ret;
- }
+ if (ret)
+ goto err_free_prph_scratch;
+
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
@@ -121,16 +117,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
- if (!prph_info)
- return -ENOMEM;
+ if (!prph_info) {
+ ret = -ENOMEM;
+ goto err_free_prph_scratch;
+ }
/* Allocate context info */
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
sizeof(*ctxt_info_gen3),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
- if (!ctxt_info_gen3)
- return -ENOMEM;
+ if (!ctxt_info_gen3) {
+ ret = -ENOMEM;
+ goto err_free_prph_info;
+ }
ctxt_info_gen3->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
@@ -186,6 +186,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
+
+err_free_prph_info:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_info),
+ prph_info,
+ trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e29c47744ef5..6f4bb7ce71a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
@@ -643,34 +645,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
@@ -726,62 +728,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
@@ -821,34 +821,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f8a1f985a1d8..6961f00ff812 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3272,11 +3272,17 @@ static struct iwl_trans_dump_data
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 tfdidx;
u32 caplen, cmdlen;
+ if (trans->trans_cfg->use_tfh)
+ tfdidx = idx;
+ else
+ tfdidx = ptr;
+
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
- cmdq->tfds +
- tfd_size * ptr);
+ (u8 *)cmdq->tfds +
+ tfd_size * tfdidx);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -3450,6 +3456,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!trans_pcie->rba.alloc_wq) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->tso_hdr_page) {
ret = -ENOMEM;
@@ -3584,10 +3599,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
mutex_init(&trans_pcie->fw_mon_data.mutex);
@@ -3599,6 +3610,8 @@ out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 45c73a6f09a1..14f562cd715c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4026,7 +4026,7 @@ static int __init init_mac80211_hwsim(void)
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
if (err < 0) {
rtnl_unlock();
- goto out_free_radios;
+ goto out_free_mon;
}
err = register_netdevice(hwsim_mon);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 2b216edd0c7d..a90a518b40d3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -23,7 +23,6 @@
#include <linux/leds.h>
#include <linux/mutex.h>
#include <linux/etherdevice.h>
-#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
#include <linux/average.h>
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 4d4e3888ef20..f2395309ec00 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file,
{
struct rt2x00debug_intf *intf = file->private_data;
struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
- static unsigned long last_reset;
+ static unsigned long last_reset = INITIAL_JIFFIES;
if (!rt2x00_has_cap_restart_hw(rt2x00dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 240f762b3749..103ed00775eb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -719,7 +719,6 @@ err_unmap:
xenvif_unmap_frontend_data_rings(queue);
netif_napi_del(&queue->napi);
err:
- module_put(THIS_MODULE);
return err;
}
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index c5289eaf17ee..e897e4d768ef 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
rc = pn533_finalize_setup(priv);
if (rc)
- goto error;
+ goto err_deregister;
usb_set_intfdata(interface, phy);
return 0;
+err_deregister:
+ pn533_unregister_device(phy->priv);
error:
+ usb_kill_urb(phy->in_urb);
+ usb_kill_urb(phy->out_urb);
+ usb_kill_urb(phy->ack_urb);
+
usb_free_urb(phy->in_urb);
usb_free_urb(phy->out_urb);
usb_free_urb(phy->ack_urb);
usb_put_dev(phy->udev);
kfree(in_buf);
+ kfree(phy->ack_buffer);
return rc;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fd7dea36c3b6..fa7ba09dca77 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -116,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
/*
* Only new queue scan work when admin and IO queues are both alive
*/
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
queue_work(nvme_wq, &ctrl->scan_work);
}
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->state != NVME_CTRL_RESETTING)
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -137,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
ret = -ENETRESET;
}
@@ -315,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
- case NVME_CTRL_ADMIN_ONLY:
- switch (old_state) {
- case NVME_CTRL_CONNECTING:
- changed = true;
- /* FALLTHRU */
- default:
- break;
- }
- break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -339,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -359,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
@@ -381,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
- if (changed)
+ if (changed) {
ctrl->state = new_state;
+ wake_up_all(&ctrl->state_wq);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (changed && ctrl->state == NVME_CTRL_LIVE)
@@ -391,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+ wait_event(ctrl->state_wq,
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+ nvme_state_terminal(ctrl));
+ return ctrl->state == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
static void nvme_free_ns_head(struct kref *ref)
{
struct nvme_ns_head *head =
@@ -1306,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
if (ns->disk && nvme_revalidate_disk(ns->disk))
nvme_set_queue_dying(ns);
up_read(&ctrl->namespaces_rwsem);
-
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1323,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_unfreeze(ctrl);
nvme_mpath_unfreeze(ctrl->subsys);
mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
@@ -2874,7 +2912,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
switch (ctrl->state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
break;
default:
return -EWOULDBLOCK;
@@ -3168,7 +3205,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
- [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -3679,11 +3715,10 @@ static void nvme_scan_work(struct work_struct *work)
struct nvme_id_ctrl *id;
unsigned nn;
- if (ctrl->state != NVME_CTRL_LIVE)
+ /* No tagset on a live ctrl means IO queues could not created */
+ if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
- WARN_ON_ONCE(!ctrl->tagset);
-
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
@@ -3844,13 +3879,13 @@ static void nvme_fw_act_work(struct work_struct *work)
if (time_after(jiffies, fw_act_timeout)) {
dev_warn(ctrl->device,
"Fw activation timeout, reset controller\n");
- nvme_reset_ctrl(ctrl);
- break;
+ nvme_try_sched_reset(ctrl);
+ return;
}
msleep(100);
}
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_start_queues(ctrl);
@@ -3870,7 +3905,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- queue_work(nvme_wq, &ctrl->fw_act_work);
+ /*
+ * We are (ab)using the RESETTING state to prevent subsequent
+ * recovery actions from interfering with the controller's
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -3993,6 +4034,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ init_waitqueue_head(&ctrl->state_wq);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 93f08d77c896..a0ec40ab62ee 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -182,8 +182,7 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE ||
- ctrl->state == NVME_CTRL_ADMIN_ONLY))
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 38a83ef5bcd3..22e8401352c2 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -15,6 +15,7 @@
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
+#include <linux/wait.h>
#include <trace/events/block.h>
@@ -161,7 +162,6 @@ static inline u16 nvme_req_qid(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
- NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
@@ -199,6 +199,7 @@ struct nvme_ctrl {
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
+ wait_queue_head_t state_wq;
struct nvme_subsystem *subsys;
struct list_head subsys_entry;
@@ -449,6 +450,7 @@ void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -499,6 +501,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb88681f4dc3..869f462e6b6e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -773,7 +773,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+ unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
+ unsigned int first_prp_len = dev->ctrl.page_size - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -2263,10 +2264,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-/*
- * return error value only when tagset allocation failed
- */
-static int nvme_dev_add(struct nvme_dev *dev)
+static void nvme_dev_add(struct nvme_dev *dev)
{
int ret;
@@ -2296,7 +2294,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (ret) {
dev_warn(dev->ctrl.device,
"IO queues tagset allocation failed %d\n", ret);
- return ret;
+ return;
}
dev->ctrl.tagset = &dev->tagset;
} else {
@@ -2307,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
}
nvme_dbbuf_set(dev);
- return 0;
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2467,6 +2464,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+ if (!nvme_wait_reset(&dev->ctrl))
+ return -EBUSY;
+ nvme_dev_disable(dev, shutdown);
+ return 0;
+}
+
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
@@ -2490,14 +2495,20 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_small_pool);
}
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+ if (dev->tagset.tags)
+ blk_mq_free_tag_set(&dev->tagset);
+ dev->ctrl.tagset = NULL;
+}
+
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
put_device(dev->dev);
- if (dev->tagset.tags)
- blk_mq_free_tag_set(&dev->tagset);
+ nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
@@ -2508,6 +2519,11 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
{
+ /*
+ * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+ * may be holding this pci_dev's device lock.
+ */
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
nvme_kill_queues(&dev->ctrl);
@@ -2521,7 +2537,6 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
result = -ENODEV;
@@ -2615,13 +2630,11 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_free_tagset(dev);
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- /* hit this only when allocate tagset fails */
- if (nvme_dev_add(dev))
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_dev_add(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2629,9 +2642,9 @@ static void nvme_reset_work(struct work_struct *work)
* If only admin queue live, keep it to do further investigation or
* recovery.
*/
- if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
dev_warn(dev->ctrl.device,
- "failed to mark controller state %d\n", new_state);
+ "failed to mark controller live state\n");
result = -ENODEV;
goto out;
}
@@ -2672,7 +2685,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- *val = readq(to_nvme_dev(ctrl)->bar + off);
+ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
return 0;
}
@@ -2836,19 +2849,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_reset_prepare(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, false);
+
+ /*
+ * We don't need to check the return value from waiting for the reset
+ * state as pci_dev device lock is held, making it impossible to race
+ * with ->remove().
+ */
+ nvme_disable_prepare_reset(dev, false);
+ nvme_sync_queues(&dev->ctrl);
}
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl_sync(&dev->ctrl);
+
+ if (!nvme_try_sched_reset(&dev->ctrl))
+ flush_work(&dev->ctrl.reset_work);
}
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, true);
+ nvme_disable_prepare_reset(dev, true);
}
/*
@@ -2901,7 +2923,7 @@ static int nvme_resume(struct device *dev)
if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
- nvme_reset_ctrl(ctrl);
+ return nvme_try_sched_reset(&ndev->ctrl);
return 0;
}
@@ -2929,17 +2951,14 @@ static int nvme_suspend(struct device *dev)
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
- (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
- nvme_dev_disable(ndev, true);
- return 0;
- }
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+ return nvme_disable_prepare_reset(ndev, true);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
nvme_sync_queues(ctrl);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
goto unfreeze;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
@@ -2965,9 +2984,8 @@ static int nvme_suspend(struct device *dev)
* Clearing npss forces a controller reset on resume. The
* correct value will be resdicovered then.
*/
- nvme_dev_disable(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
- ret = 0;
}
unfreeze:
nvme_unfreeze(ctrl);
@@ -2977,9 +2995,7 @@ unfreeze:
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
-
- nvme_dev_disable(ndev, true);
- return 0;
+ return nvme_disable_prepare_reset(ndev, true);
}
static int nvme_simple_resume(struct device *dev)
@@ -2987,8 +3003,7 @@ static int nvme_simple_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&ndev->ctrl);
- return 0;
+ return nvme_try_sched_reset(&ndev->ctrl);
}
static const struct dev_pm_ops nvme_dev_pm_ops = {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4d280160dd3f..f19a28b4e997 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1701,6 +1701,14 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
* Teardown immediately if controller times out while starting
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 385a5212c10f..770dbcbc999e 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1386,7 +1386,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
queue->sock->sk->sk_ll_usec = 1;
+#endif
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
@@ -2044,6 +2046,14 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
dev_warn(ctrl->ctrl.device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
@@ -2126,6 +2136,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
ret = nvme_tcp_map_data(queue, rq);
if (unlikely(ret)) {
+ nvme_cleanup_cmd(rq);
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
return ret;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 748a39fca771..11f5aea97d1b 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -157,8 +157,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl, SG_CHUNK_SIZE))
+ iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+ nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE;
+ }
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index ed50502cc65a..de8e4e347249 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -678,14 +678,6 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return(0);
}
- /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
- * first, then fall back to 32-bit if that fails.
- * We are just "encouraging" 32-bit DMA masks here since we can
- * never allow IOMMU bypass unless we add special support for ZX1.
- */
- if (mask > ~0U)
- return 0;
-
ioc = GET_IOC(dev);
if (!ioc)
return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e7982af9a5d8..a97e2571a527 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -959,19 +959,6 @@ void pci_refresh_power_state(struct pci_dev *dev)
}
/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- if (platform_pci_power_manageable(dev))
- platform_pci_set_power_state(dev, PCI_D0);
-
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
-}
-
-/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -1154,6 +1141,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
EXPORT_SYMBOL(pci_set_power_state);
/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ __pci_start_power_transition(dev, PCI_D0);
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 86cc2cc68fb5..af063f690846 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -420,12 +420,6 @@ failed_sensitivity:
static int cmpc_accel_remove_v4(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
return cmpc_remove_acpi_notify_device(acpi);
@@ -656,12 +650,6 @@ failed_file:
static int cmpc_accel_remove(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
return cmpc_remove_acpi_notify_device(acpi);
}
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index ea68f6ed66ae..ffb8d5d1eb5f 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -108,6 +108,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
if (ret < 0) {
dev_dbg(dev, "Error requesting irq at index %d: %d\n",
inst_data[i].irq_idx, ret);
+ goto error;
}
board_info.irq = ret;
break;
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index ab7ae1950867..fa97834fdb78 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, punit_ipcdev);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- punit_ipcdev->irq = 0;
dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
} else {
ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 960961fb0d7c..0517272a268e 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
- when using the end-to-end delay (E2E) mechansim. The peer
- delay mechansim (P2P) is not supported.
+ when using the end-to-end delay (E2E) mechanism. The peer
+ delay mechanism (P2P) is not supported.
This clock is only useful if your PTP programs are getting
hardware time stamps on the PTP Ethernet packets using the
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index ba7d2480613b..dcdaba689b20 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -113,6 +113,7 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ u64 dma_mask;
char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1fbfb0a93f5f..831850435c23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
- sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
+ /*
+ * But we don't have such restrictions imposed on the stuff that
+ * is handled by the streaming API.
+ */
+ sch->dma_mask = DMA_BIT_MASK(64);
+ sch->dev.dma_mask = &sch->dma_mask;
return sch;
err:
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 131430bd48d9..0c6245fc7706 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
if (!cdev->private)
goto err_priv;
cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
- cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = sch->dev.dma_mask;
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
if (!dma_pool)
goto err_dma_pool;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b8799cd3e7aa..bd8143e51747 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2021,10 +2021,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
static void qeth_l2_vnicc_init(struct qeth_card *card)
{
u32 *timeout = &card->options.vnicc.learning_timeout;
+ bool enable, error = false;
unsigned int chars_len, i;
unsigned long chars_tmp;
u32 sup_cmds, vnicc;
- bool enable, error;
QETH_CARD_TEXT(card, 2, "vniccini");
/* reset rx_bcast */
@@ -2045,17 +2045,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
- qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
- if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
- !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
+ sup_cmds = 0;
+ error = true;
+ }
+ if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
+ (sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ card->options.vnicc.getset_timeout_sup |= vnicc;
+ else
card->options.vnicc.getset_timeout_sup &= ~vnicc;
- if (!(sup_cmds & IPA_VNICC_ENABLE) ||
- !(sup_cmds & IPA_VNICC_DISABLE))
+ if ((sup_cmds & IPA_VNICC_ENABLE) &&
+ (sup_cmds & IPA_VNICC_DISABLE))
+ card->options.vnicc.set_char_sup |= vnicc;
+ else
card->options.vnicc.set_char_sup &= ~vnicc;
}
/* enforce assumed default values and recover settings, if changed */
- error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
- timeout);
+ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+ timeout);
chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 296bbc3c4606..cf63916814cc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -27,6 +27,11 @@
struct kmem_cache *zfcp_fsf_qtcb_cache;
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- dev_warn(&adapter->ccw_device->dev,
- "The error threshold for checksum statistics "
- "has been exceeded\n");
zfcp_dbf_hba_bit_err("fssrh_3", req);
+ if (ber_stop) {
+ dev_warn(&adapter->ccw_device->dev,
+ "All paths over this FCP device are disused because of excessive bit errors\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+ } else {
+ dev_warn(&adapter->ccw_device->dev,
+ "The error threshold for checksum statistics has been exceeded\n");
+ }
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 4c26630c1c3e..009fd5a33fcd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
- } else {
- WARN_ON_ONCE(true);
}
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c470e31ae81..ae2fa170f6ad 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
+ ses->resid_len = scmd->req.resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
+ scmd->req.resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
+ scmd->req.resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dc210b9d4896..5447738906ac 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
@@ -1921,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
- if (q->mq_ops == &scsi_mq_ops)
+ if (q->mq_ops == &scsi_mq_ops_no_commit ||
+ q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 50928bc266eb..03163ac5fe95 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1654,7 +1654,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
/* we need to evaluate the error return */
if (scsi_sense_valid(sshdr) &&
(sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20)) /* invalid command */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
/* this is no error here */
return 0;
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index 290dbfc7ace1..ce32dfe33bec 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config EXFAT_FS
tristate "exFAT fs support"
depends on BLOCK
@@ -6,7 +7,7 @@ config EXFAT_FS
This adds support for the exFAT file system.
config EXFAT_DONT_MOUNT_VFAT
- bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+ bool "Prohibit mounting of fat/vfat filesystems by exFAT"
depends on EXFAT_FS
default y
help
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
index 84944dfbae28..6c90aec83feb 100644
--- a/drivers/staging/exfat/Makefile
+++ b/drivers/staging/exfat/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-or-later
obj-$(CONFIG_EXFAT_FS) += exfat.o
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 6c12f2d79f4d..3abab33e932c 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index f086c75e7076..81d20e6241c6 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
index 1565ce65d39f..e1b001718709 100644
--- a/drivers/staging/exfat/exfat_cache.c
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index b3e9cf725cf5..79174e5c4145 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
index 03cb8290b5d2..a5c4b68925fb 100644
--- a/drivers/staging/exfat/exfat_nls.c
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 5f6caee819a6..3b2b0ceb7297 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
@@ -3450,7 +3451,7 @@ static void exfat_free_super(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
/* mutex_init is in exfat_fill_super function. only for 3.7+ */
mutex_destroy(&sbi->s_lock);
- kfree(sbi);
+ kvfree(sbi);
}
static void exfat_put_super(struct super_block *sb)
@@ -3845,7 +3846,7 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
* the filesystem, since we're only just about to mount
* it and have no inodes etc active!
*/
- sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+ sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
mutex_init(&sbi->s_lock);
diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c
index 366082fb3dab..b91a1faa0e50 100644
--- a/drivers/staging/exfat/exfat_upcase.c
+++ b/drivers/staging/exfat/exfat_upcase.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 8ec524a95ec8..cb61c2a772bd 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI
+ depends on FB && SPI && OF
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -199,13 +199,3 @@ config FB_TFT_WATTEROTT
depends on FB_TFT
help
Generic Framebuffer support for WATTEROTT
-
-config FB_FLEX
- tristate "Generic FB driver for TFT LCD displays"
- depends on FB_TFT
- help
- Generic Framebuffer support for TFT LCD displays.
-
-config FB_TFT_FBTFT_DEVICE
- tristate "Module to for adding FBTFT devices"
- depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 6bc03311c9c7..27af43f32f81 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -36,7 +36,3 @@ obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
-obj-$(CONFIG_FB_FLEX) += flexfb.o
-
-# Device modules
-obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index cf5700a2ea66..a0a67aa517f0 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -714,7 +714,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
if (par->gamma.curves && gamma) {
if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
strlen(gamma)))
- goto alloc_fail;
+ goto release_framebuf;
}
/* Transmit buffer */
@@ -731,7 +731,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
if (txbuflen > 0) {
txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
if (!txbuf)
- goto alloc_fail;
+ goto release_framebuf;
par->txbuf.buf = txbuf;
par->txbuf.len = txbuflen;
}
@@ -753,6 +753,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
return info;
+release_framebuf:
+ framebuffer_release(info);
+
alloc_fail:
vfree(vmem);
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
deleted file mode 100644
index 44e1410eb3fe..000000000000
--- a/drivers/staging/fbtft/fbtft_device.c
+++ /dev/null
@@ -1,1261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *
- * Copyright (C) 2013, Noralf Tronnes
- */
-
-#define pr_fmt(fmt) "fbtft_device: " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <video/mipi_display.h>
-
-#include "fbtft.h"
-
-#define MAX_GPIOS 32
-
-static struct spi_device *spi_device;
-static struct platform_device *p_device;
-
-static char *name;
-module_param(name, charp, 0000);
-MODULE_PARM_DESC(name,
- "Devicename (required). name=list => list all supported devices.");
-
-static unsigned int rotate;
-module_param(rotate, uint, 0000);
-MODULE_PARM_DESC(rotate,
- "Angle to rotate display counter clockwise: 0, 90, 180, 270");
-
-static unsigned int busnum;
-module_param(busnum, uint, 0000);
-MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-
-static unsigned int cs;
-module_param(cs, uint, 0000);
-MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-
-static unsigned int speed;
-module_param(speed, uint, 0000);
-MODULE_PARM_DESC(speed, "SPI speed (override device default)");
-
-static int mode = -1;
-module_param(mode, int, 0000);
-MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-
-static unsigned int fps;
-module_param(fps, uint, 0000);
-MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
-
-static char *gamma;
-module_param(gamma, charp, 0000);
-MODULE_PARM_DESC(gamma,
- "String representation of Gamma Curve(s). Driver specific.");
-
-static int txbuflen;
-module_param(txbuflen, int, 0000);
-MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
-
-static int bgr = -1;
-module_param(bgr, int, 0000);
-MODULE_PARM_DESC(bgr,
- "BGR bit (supported by some drivers).");
-
-static unsigned int startbyte;
-module_param(startbyte, uint, 0000);
-MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
-
-static bool custom;
-module_param(custom, bool, 0000);
-MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-
-static s16 init[FBTFT_MAX_INIT_SEQUENCE];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
-
-static unsigned long debug;
-module_param(debug, ulong, 0000);
-MODULE_PARM_DESC(debug,
- "level: 0-7 (the remaining 29 bits is for advanced usage)");
-
-static unsigned int verbose = 3;
-module_param(verbose, uint, 0000);
-MODULE_PARM_DESC(verbose,
- "0 silent, >1 show devices, >2 show devices before (default=3)");
-
-struct fbtft_device_display {
- char *name;
- struct spi_board_info *spi;
- struct platform_device *pdev;
-};
-
-static void fbtft_device_pdev_release(struct device *dev);
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
-
-#define ADAFRUIT18_GAMMA \
- "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
- "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
-
-#define CBERRY28_GAMMA \
- "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
- "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-
-static const s16 cberry28_init_sequence[] = {
- /* turn off sleep mode */
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 120,
-
- /* set pixel format to RGB-565 */
- -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
-
- -1, 0xB2, 0x0C, 0x0C, 0x00, 0x33, 0x33,
-
- /*
- * VGH = 13.26V
- * VGL = -10.43V
- */
- -1, 0xB7, 0x35,
-
- /*
- * VDV and VRH register values come from command write
- * (instead of NVM)
- */
- -1, 0xC2, 0x01, 0xFF,
-
- /*
- * VAP = 4.7V + (VCOM + VCOM offset + 0.5 * VDV)
- * VAN = -4.7V + (VCOM + VCOM offset + 0.5 * VDV)
- */
- -1, 0xC3, 0x17,
-
- /* VDV = 0V */
- -1, 0xC4, 0x20,
-
- /* VCOM = 0.675V */
- -1, 0xBB, 0x17,
-
- /* VCOM offset = 0V */
- -1, 0xC5, 0x20,
-
- /*
- * AVDD = 6.8V
- * AVCL = -4.8V
- * VDS = 2.3V
- */
- -1, 0xD0, 0xA4, 0xA1,
-
- -1, MIPI_DCS_SET_DISPLAY_ON,
-
- -3,
-};
-
-static const s16 hy28b_init_sequence[] = {
- -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
- -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0207, -1, 0x0009, 0x0000,
- -1, 0x000a, 0x0000, -1, 0x000c, 0x0001,
- -1, 0x000d, 0x0000, -1, 0x000f, 0x0000,
- -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000, -1, 0x0013, 0x0000,
- -2, 50, -1, 0x0010, 0x1590, -1, 0x0011,
- 0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50,
- -1, 0x0013, 0x1900, -1, 0x0029, 0x0023,
- -1, 0x002b, 0x000e, -2, 50,
- -1, 0x0020, 0x0000, -1, 0x0021, 0x0000,
- -2, 50, -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013f, -1, 0x0060, 0xa700,
- -1, 0x0061, 0x0001, -1, 0x006a, 0x0000,
- -1, 0x0080, 0x0000, -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000, -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000, -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010, -1, 0x0092, 0x0000,
- -1, 0x0093, 0x0003, -1, 0x0095, 0x0110,
- -1, 0x0097, 0x0000, -1, 0x0098, 0x0000,
- -1, 0x0007, 0x0133, -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000, -2, 100, -3 };
-
-#define HY28B_GAMMA \
- "04 1F 4 7 7 0 7 7 6 0\n" \
- "0F 00 1 7 4 0 0 0 6 7"
-
-static const s16 pitft_init_sequence[] = {
- -1, MIPI_DCS_SOFT_RESET,
- -2, 5,
- -1, MIPI_DCS_SET_DISPLAY_OFF,
- -1, 0xEF, 0x03, 0x80, 0x02,
- -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xE8, 0x85, 0x00, 0x78,
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x23,
- -1, 0xC1, 0x10,
- -1, 0xC5, 0x3E, 0x28,
- -1, 0xC7, 0x86,
- -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
- -1, 0xB1, 0x00, 0x18,
- -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00,
- -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
- -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 100,
- -1, MIPI_DCS_SET_DISPLAY_ON,
- -2, 20,
- -3
-};
-
-static const s16 waveshare32b_init_sequence[] = {
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xE8, 0x85, 0x00, 0x78,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xF7, 0x20,
- -1, 0xC0, 0x23,
- -1, 0xC1, 0x10,
- -1, 0xC5, 0x3E, 0x28,
- -1, 0xC7, 0x86,
- -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
- -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
- -1, 0xB1, 0x00, 0x18,
- -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00,
- -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
- -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 120,
- -1, MIPI_DCS_SET_DISPLAY_ON,
- -1, MIPI_DCS_WRITE_MEMORY_START,
- -3
-};
-
-#define PIOLED_GAMMA "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \
- "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \
- "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4"
-
-/* Supported displays in alphabetical order */
-static struct fbtft_device_display displays[] = {
- {
- .name = "adafruit18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit18_green",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .fbtftops.set_addr_win =
- adafruit18_green_tab_set_addr_win,
- },
- .bgr = true,
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8340bn",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit22a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit28",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit13m",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1306",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "admatec_c-berry28",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7789v",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = cberry28_init_sequence,
- },
- .gamma = CBERRY28_GAMMA,
- }
- }
- }, {
- .name = "agm1264k-fl",
- .pdev = &(struct platform_device) {
- .name = "fb_agm1264k-fl",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- },
- }
- }
- }, {
- .name = "dogs102",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1701",
- .max_speed_hz = 8000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "er_tftm050_2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 480,
- .height = 272,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "er_tftm070_5",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 800,
- .height = 480,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "ew24ha0",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "ew24ha0_9bit",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- },
- }
- }
- }, {
- .name = "flexfb",
- .spi = &(struct spi_board_info) {
- .modalias = "flexfb",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- }
- }, {
- .name = "flexpfb",
- .pdev = &(struct platform_device) {
- .name = "flexpfb",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- }
- }
- }, {
- .name = "freetronicsoled128",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "hx8353d",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8353d",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- }
- }
- }, {
- .name = "hy28a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9320",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- }
- }
- }, {
- .name = "hy28b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = hy28b_init_sequence,
- },
- .startbyte = 0x70,
- .bgr = true,
- .fps = 50,
- .gamma = HY28B_GAMMA,
- }
- }
- }, {
- .name = "ili9481",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "itdb24",
- .pdev = &(struct platform_device) {
- .name = "fb_s6d1121",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = false,
- },
- }
- }
- }, {
- .name = "itdb28",
- .pdev = &(struct platform_device) {
- .name = "fb_ili9325",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- },
- }
- }
- }, {
- .name = "itdb28_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8347d",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-9a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-v2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_watterott",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- }
- }
- }, {
- .name = "nokia3310",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_pcd8544",
- .max_speed_hz = 400000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "nokia3310a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tls8204",
- .max_speed_hz = 1000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "nokia5110",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9163",
- .max_speed_hz = 12000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "piscreen",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "pitft",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .chip_select = 0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = pitft_init_sequence,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "pioled",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- .gamma = PIOLED_GAMMA
- }
- }
- }, {
- .name = "rpi-display",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "s6d02a1",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_s6d02a1",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "sainsmart18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "sainsmart32",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write = write_gpio16_wr_slow,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_fast",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_latched",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write =
- fbtft_write_gpio16_wr_latched,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1289",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "spidev",
- .spi = &(struct spi_board_info) {
- .modalias = "spidev",
- .max_speed_hz = 500000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- }
- }
- }, {
- .name = "ssd1331",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1331",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "tinylcd35",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tinylcd",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tm022hdh26",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tontec35_9481", /* boards before 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tontec35_9486", /* boards after 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "upd161704",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_upd161704",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "waveshare32b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence =
- waveshare32b_init_sequence,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "waveshare22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_bd663474",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- /* This should be the last item.
- * Used with the custom argument
- */
- .name = "",
- .spi = &(struct spi_board_info) {
- .modalias = "",
- .max_speed_hz = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- }
- },
- .pdev = &(struct platform_device) {
- .name = "",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- },
- },
- },
- }
-};
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
-{
- u16 data;
- int i;
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- static u16 prev_data;
-#endif
-
- fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%zu): ", __func__, len);
-
- while (len) {
- data = *(u16 *)buf;
-
- /* Start writing by pulling down /WR */
- gpiod_set_value(par->gpio.wr, 0);
-
- /* Set data */
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- if (data == prev_data) {
- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
- } else {
- for (i = 0; i < 16; i++) {
- if ((data & 1) != (prev_data & 1))
- gpiod_set_value(par->gpio.db[i],
- data & 1);
- data >>= 1;
- prev_data >>= 1;
- }
- }
-#else
- for (i = 0; i < 16; i++) {
- gpiod_set_value(par->gpio.db[i], data & 1);
- data >>= 1;
- }
-#endif
-
- /* Pullup /WR */
- gpiod_set_value(par->gpio.wr, 1);
-
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *)buf;
-#endif
- buf += 2;
- len -= 2;
- }
-
- return 0;
-}
-
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
- write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
- write_reg(par, 0x2C);
-}
-
-static void fbtft_device_pdev_release(struct device *dev)
-{
-/* Needed to silence this message:
- * Device 'xxx' does not have a release() function,
- * it is broken and must be fixed
- */
-}
-
-static int spi_device_found(struct device *dev, void *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
- dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
- spi->mode);
-
- return 0;
-}
-
-static void pr_spi_devices(void)
-{
- pr_debug("SPI devices registered:\n");
- bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
-}
-
-static int p_device_found(struct device *dev, void *data)
-{
- struct platform_device
- *pdev = to_platform_device(dev);
-
- if (strstr(pdev->name, "fb"))
- dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
- pdev->dev.platform_data ? "yes" : "no");
-
- return 0;
-}
-
-static void pr_p_devices(void)
-{
- pr_debug("'fb' Platform devices registered:\n");
- bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
-}
-
-#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs)
-{
- struct device *dev;
- char str[32];
-
- snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
-
- dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
- if (dev) {
- if (verbose)
- dev_info(dev, "Deleting %s\n", str);
- device_del(dev);
- }
-}
-
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- struct spi_master *master;
-
- master = spi_busnum_to_master(spi->bus_num);
- if (!master) {
- pr_err("spi_busnum_to_master(%d) returned NULL\n",
- spi->bus_num);
- return -EINVAL;
- }
- /* make sure it's available */
- fbtft_device_spi_delete(master, spi->chip_select);
- spi_device = spi_new_device(master, spi);
- put_device(&master->dev);
- if (!spi_device) {
- dev_err(&master->dev, "spi_new_device() returned NULL\n");
- return -EPERM;
- }
- return 0;
-}
-#else
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- return spi_register_board_info(spi, 1);
-}
-#endif
-
-static int __init fbtft_device_init(void)
-{
- struct spi_board_info *spi = NULL;
- struct fbtft_platform_data *pdata;
- bool found = false;
- int i = 0;
- int ret = 0;
-
- if (!name) {
-#ifdef MODULE
- pr_err("missing module parameter: 'name'\n");
- return -EINVAL;
-#else
- return 0;
-#endif
- }
-
- if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
- pr_err("init parameter: exceeded max array size: %d\n",
- FBTFT_MAX_INIT_SEQUENCE);
- return -EINVAL;
- }
-
- if (verbose > 2) {
- pr_spi_devices(); /* print list of registered SPI devices */
- pr_p_devices(); /* print list of 'fb' platform devices */
- }
-
- pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
-
- if (rotate > 0 && rotate < 4) {
- rotate = (4 - rotate) * 90;
- pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
- rotate);
- }
- if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
- pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
- rotate);
- rotate = 0;
- }
-
- /* name=list lists all supported displays */
- if (strcmp(name, "list") == 0) {
- pr_info("Supported displays:\n");
-
- for (i = 0; i < ARRAY_SIZE(displays); i++)
- pr_info("%s\n", displays[i].name);
- return -ECANCELED;
- }
-
- if (custom) {
- i = ARRAY_SIZE(displays) - 1;
- displays[i].name = name;
- if (speed == 0) {
- displays[i].pdev->name = name;
- displays[i].spi = NULL;
- } else {
- size_t len;
-
- len = strlcpy(displays[i].spi->modalias, name,
- SPI_NAME_SIZE);
- if (len >= SPI_NAME_SIZE)
- pr_warn("modalias (name) truncated to: %s\n",
- displays[i].spi->modalias);
- displays[i].pdev = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(displays); i++) {
- if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) {
- if (displays[i].spi) {
- spi = displays[i].spi;
- spi->chip_select = cs;
- spi->bus_num = busnum;
- if (speed)
- spi->max_speed_hz = speed;
- if (mode != -1)
- spi->mode = mode;
- pdata = (void *)spi->platform_data;
- } else if (displays[i].pdev) {
- p_device = displays[i].pdev;
- pdata = p_device->dev.platform_data;
- } else {
- pr_err("broken displays array\n");
- return -EINVAL;
- }
-
- pdata->rotate = rotate;
- if (bgr == 0)
- pdata->bgr = false;
- else if (bgr == 1)
- pdata->bgr = true;
- if (startbyte)
- pdata->startbyte = startbyte;
- if (gamma)
- pdata->gamma = gamma;
- pdata->display.debug = debug;
- if (fps)
- pdata->fps = fps;
- if (txbuflen)
- pdata->txbuflen = txbuflen;
- if (init_num)
- pdata->display.init_sequence = init;
- if (custom) {
- pdata->display.width = width;
- pdata->display.height = height;
- pdata->display.buswidth = buswidth;
- pdata->display.backlight = 1;
- }
-
- if (displays[i].spi) {
- ret = fbtft_device_spi_device_register(spi);
- if (ret) {
- pr_err("failed to register SPI device\n");
- return ret;
- }
- } else {
- ret = platform_device_register(p_device);
- if (ret < 0) {
- pr_err("platform_device_register() returned %d\n",
- ret);
- return ret;
- }
- }
- found = true;
- break;
- }
- }
-
- if (!found) {
- pr_err("display not supported: '%s'\n", name);
- return -EINVAL;
- }
-
- if (spi_device && (verbose > 1))
- pr_spi_devices();
- if (p_device && (verbose > 1))
- pr_p_devices();
-
- return 0;
-}
-
-static void __exit fbtft_device_exit(void)
-{
- if (spi_device) {
- device_del(&spi_device->dev);
- kfree(spi_device);
- }
-
- if (p_device)
- platform_device_unregister(p_device);
-}
-
-arch_initcall(fbtft_device_init);
-module_exit(fbtft_device_exit);
-
-MODULE_DESCRIPTION("Add a FBTFT device.");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
deleted file mode 100644
index 3747321011fa..000000000000
--- a/drivers/staging/fbtft/flexfb.c
+++ /dev/null
@@ -1,851 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Generic FB driver for TFT LCD displays
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "flexfb"
-
-static char *chip;
-module_param(chip, charp, 0000);
-MODULE_PARM_DESC(chip, "LCD controller");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height");
-
-static s16 init[512];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence");
-
-static unsigned int setaddrwin;
-module_param(setaddrwin, uint, 0000);
-MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
-
-static unsigned int regwidth = 8;
-module_param(regwidth, uint, 0000);
-MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
-
-static bool nobacklight;
-module_param(nobacklight, bool, 0000);
-MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
-
-static bool latched;
-module_param(latched, bool, 0000);
-MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
-static const s16 *initp;
-static int initp_num;
-
-/* default init sequences */
-static const s16 st7735r_init[] = {
- -1, 0x01,
- -2, 150,
- -1, 0x11,
- -2, 500,
- -1, 0xB1, 0x01, 0x2C, 0x2D,
- -1, 0xB2, 0x01, 0x2C, 0x2D,
- -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
- -1, 0xB4, 0x07,
- -1, 0xC0, 0xA2, 0x02, 0x84,
- -1, 0xC1, 0xC5,
- -1, 0xC2, 0x0A, 0x00,
- -1, 0xC3, 0x8A, 0x2A,
- -1, 0xC4, 0x8A, 0xEE,
- -1, 0xC5, 0x0E,
- -1, 0x20,
- -1, 0x36, 0xC0,
- -1, 0x3A, 0x05,
- -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
- 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
- -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
- 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
- -1, 0x29,
- -2, 100,
- -1, 0x13,
- -2, 10,
- -3
-};
-
-static const s16 ssd1289_init[] = {
- -1, 0x00, 0x0001,
- -1, 0x03, 0xA8A4,
- -1, 0x0C, 0x0000,
- -1, 0x0D, 0x080C,
- -1, 0x0E, 0x2B00,
- -1, 0x1E, 0x00B7,
- -1, 0x01, 0x2B3F,
- -1, 0x02, 0x0600,
- -1, 0x10, 0x0000,
- -1, 0x11, 0x6070,
- -1, 0x05, 0x0000,
- -1, 0x06, 0x0000,
- -1, 0x16, 0xEF1C,
- -1, 0x17, 0x0003,
- -1, 0x07, 0x0233,
- -1, 0x0B, 0x0000,
- -1, 0x0F, 0x0000,
- -1, 0x41, 0x0000,
- -1, 0x42, 0x0000,
- -1, 0x48, 0x0000,
- -1, 0x49, 0x013F,
- -1, 0x4A, 0x0000,
- -1, 0x4B, 0x0000,
- -1, 0x44, 0xEF00,
- -1, 0x45, 0x0000,
- -1, 0x46, 0x013F,
- -1, 0x30, 0x0707,
- -1, 0x31, 0x0204,
- -1, 0x32, 0x0204,
- -1, 0x33, 0x0502,
- -1, 0x34, 0x0507,
- -1, 0x35, 0x0204,
- -1, 0x36, 0x0204,
- -1, 0x37, 0x0502,
- -1, 0x3A, 0x0302,
- -1, 0x3B, 0x0302,
- -1, 0x23, 0x0000,
- -1, 0x24, 0x0000,
- -1, 0x25, 0x8000,
- -1, 0x4f, 0x0000,
- -1, 0x4e, 0x0000,
- -1, 0x22,
- -3
-};
-
-static const s16 hx8340bn_init[] = {
- -1, 0xC1, 0xFF, 0x83, 0x40,
- -1, 0x11,
- -2, 150,
- -1, 0xCA, 0x70, 0x00, 0xD9,
- -1, 0xB0, 0x01, 0x11,
- -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
- -2, 20,
- -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
- -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
- -2, 10,
- -1, 0xB5, 0x35, 0x20, 0x45,
- -1, 0xB4, 0x33, 0x25, 0x4C,
- -2, 10,
- -1, 0x3A, 0x05,
- -1, 0x29,
- -2, 10,
- -3
-};
-
-static const s16 ili9225_init[] = {
- -1, 0x0001, 0x011C,
- -1, 0x0002, 0x0100,
- -1, 0x0003, 0x1030,
- -1, 0x0008, 0x0808,
- -1, 0x000C, 0x0000,
- -1, 0x000F, 0x0A01,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -2, 50,
- -1, 0x0010, 0x0A00,
- -1, 0x0011, 0x1038,
- -2, 50,
- -1, 0x0012, 0x1121,
- -1, 0x0013, 0x004E,
- -1, 0x0014, 0x676F,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x00DB,
- -1, 0x0032, 0x0000,
- -1, 0x0033, 0x0000,
- -1, 0x0034, 0x00DB,
- -1, 0x0035, 0x0000,
- -1, 0x0036, 0x00AF,
- -1, 0x0037, 0x0000,
- -1, 0x0038, 0x00DB,
- -1, 0x0039, 0x0000,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x060A,
- -1, 0x0052, 0x0D0A,
- -1, 0x0053, 0x0303,
- -1, 0x0054, 0x0A0D,
- -1, 0x0055, 0x0A06,
- -1, 0x0056, 0x0000,
- -1, 0x0057, 0x0303,
- -1, 0x0058, 0x0000,
- -1, 0x0059, 0x0000,
- -2, 50,
- -1, 0x0007, 0x1017,
- -2, 50,
- -3
-};
-
-static const s16 ili9320_init[] = {
- -1, 0x00E5, 0x8000,
- -1, 0x0000, 0x0001,
- -1, 0x0001, 0x0100,
- -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030,
- -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0202,
- -1, 0x0009, 0x0000,
- -1, 0x000A, 0x0000,
- -1, 0x000C, 0x0000,
- -1, 0x000D, 0x0000,
- -1, 0x000F, 0x0000,
- -1, 0x0010, 0x0000,
- -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000,
- -1, 0x0013, 0x0000,
- -2, 200,
- -1, 0x0010, 0x17B0,
- -1, 0x0011, 0x0031,
- -2, 50,
- -1, 0x0012, 0x0138,
- -2, 50,
- -1, 0x0013, 0x1800,
- -1, 0x0029, 0x0008,
- -2, 50,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x0505,
- -1, 0x0032, 0x0004,
- -1, 0x0035, 0x0006,
- -1, 0x0036, 0x0707,
- -1, 0x0037, 0x0105,
- -1, 0x0038, 0x0002,
- -1, 0x0039, 0x0707,
- -1, 0x003C, 0x0704,
- -1, 0x003D, 0x0807,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00EF,
- -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013F,
- -1, 0x0060, 0x2700,
- -1, 0x0061, 0x0001,
- -1, 0x006A, 0x0000,
- -1, 0x0080, 0x0000,
- -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000,
- -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000,
- -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010,
- -1, 0x0092, 0x0000,
- -1, 0x0093, 0x0003,
- -1, 0x0095, 0x0110,
- -1, 0x0097, 0x0000,
- -1, 0x0098, 0x0000,
- -1, 0x0007, 0x0173,
- -3
-};
-
-static const s16 ili9325_init[] = {
- -1, 0x00E3, 0x3008,
- -1, 0x00E7, 0x0012,
- -1, 0x00EF, 0x1231,
- -1, 0x0001, 0x0100,
- -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030,
- -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0207,
- -1, 0x0009, 0x0000,
- -1, 0x000A, 0x0000,
- -1, 0x000C, 0x0000,
- -1, 0x000D, 0x0000,
- -1, 0x000F, 0x0000,
- -1, 0x0010, 0x0000,
- -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000,
- -1, 0x0013, 0x0000,
- -2, 200,
- -1, 0x0010, 0x1690,
- -1, 0x0011, 0x0223,
- -2, 50,
- -1, 0x0012, 0x000D,
- -2, 50,
- -1, 0x0013, 0x1200,
- -1, 0x0029, 0x000A,
- -1, 0x002B, 0x000C,
- -2, 50,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x0506,
- -1, 0x0032, 0x0104,
- -1, 0x0035, 0x0207,
- -1, 0x0036, 0x000F,
- -1, 0x0037, 0x0306,
- -1, 0x0038, 0x0102,
- -1, 0x0039, 0x0707,
- -1, 0x003C, 0x0702,
- -1, 0x003D, 0x1604,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00EF,
- -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013F,
- -1, 0x0060, 0xA700,
- -1, 0x0061, 0x0001,
- -1, 0x006A, 0x0000,
- -1, 0x0080, 0x0000,
- -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000,
- -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000,
- -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010,
- -1, 0x0092, 0x0600,
- -1, 0x0007, 0x0133,
- -3
-};
-
-static const s16 ili9341_init[] = {
- -1, 0x28,
- -2, 20,
- -1, 0xCF, 0x00, 0x83, 0x30,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xE8, 0x85, 0x01, 0x79,
- -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x26,
- -1, 0xC1, 0x11,
- -1, 0xC5, 0x35, 0x3E,
- -1, 0xC7, 0xBE,
- -1, 0xB1, 0x00, 0x1B,
- -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
- -1, 0xB7, 0x07,
- -1, 0x3A, 0x55,
- -1, 0x36, 0x48,
- -1, 0x11,
- -2, 120,
- -1, 0x29,
- -2, 20,
- -3
-};
-
-static const s16 ssd1351_init[] = {
- -1, 0xfd, 0x12,
- -1, 0xfd, 0xb1,
- -1, 0xae,
- -1, 0xb3, 0xf1,
- -1, 0xca, 0x7f,
- -1, 0xa0, 0x74,
- -1, 0x15, 0x00, 0x7f,
- -1, 0x75, 0x00, 0x7f,
- -1, 0xa1, 0x00,
- -1, 0xa2, 0x00,
- -1, 0xb5, 0x00,
- -1, 0xab, 0x01,
- -1, 0xb1, 0x32,
- -1, 0xb4, 0xa0, 0xb5, 0x55,
- -1, 0xbb, 0x17,
- -1, 0xbe, 0x05,
- -1, 0xc1, 0xc8, 0x80, 0xc8,
- -1, 0xc7, 0x0f,
- -1, 0xb6, 0x01,
- -1, 0xa6,
- -1, 0xaf,
- -3
-};
-
-/**
- * struct flexfb_lcd_controller - Describes the LCD controller properties
- * @name: Model name of the chip
- * @width: Width of display in pixels
- * @height: Height of display in pixels
- * @setaddrwin: Which set_addr_win() implementation to use
- * @regwidth: LCD Controller Register width in bits
- * @init_seq: LCD initialization sequence
- * @init_seq_sz: Size of LCD initialization sequence
- */
-struct flexfb_lcd_controller {
- const char *name;
- unsigned int width;
- unsigned int height;
- unsigned int setaddrwin;
- unsigned int regwidth;
- const s16 *init_seq;
- int init_seq_sz;
-};
-
-static const struct flexfb_lcd_controller flexfb_chip_table[] = {
- {
- .name = "st7735r",
- .width = 120,
- .height = 160,
- .init_seq = st7735r_init,
- .init_seq_sz = ARRAY_SIZE(st7735r_init),
- },
- {
- .name = "hx8340bn",
- .width = 176,
- .height = 220,
- .init_seq = hx8340bn_init,
- .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
- },
- {
- .name = "ili9225",
- .width = 176,
- .height = 220,
- .regwidth = 16,
- .init_seq = ili9225_init,
- .init_seq_sz = ARRAY_SIZE(ili9225_init),
- },
- {
- .name = "ili9320",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9320_init,
- .init_seq_sz = ARRAY_SIZE(ili9320_init),
- },
- {
- .name = "ili9325",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9325_init,
- .init_seq_sz = ARRAY_SIZE(ili9325_init),
- },
- {
- .name = "ili9341",
- .width = 240,
- .height = 320,
- .init_seq = ili9341_init,
- .init_seq_sz = ARRAY_SIZE(ili9341_init),
- },
- {
- .name = "ssd1289",
- .width = 240,
- .height = 320,
- .setaddrwin = 2,
- .regwidth = 16,
- .init_seq = ssd1289_init,
- .init_seq_sz = ARRAY_SIZE(ssd1289_init),
- },
- {
- .name = "ssd1351",
- .width = 128,
- .height = 128,
- .setaddrwin = 3,
- .init_seq = ssd1351_init,
- .init_seq_sz = ARRAY_SIZE(ssd1351_init),
- },
-};
-
-/* ili9320, ili9325 */
-static void flexfb_set_addr_win_1(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R20h = Horizontal GRAM Start Address */
- /* R21h = Vertical GRAM Start Address */
- case 0:
- write_reg(par, 0x0020, xs);
- write_reg(par, 0x0021, ys);
- break;
- case 180:
- write_reg(par, 0x0020, width - 1 - xs);
- write_reg(par, 0x0021, height - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x0020, width - 1 - ys);
- write_reg(par, 0x0021, xs);
- break;
- case 90:
- write_reg(par, 0x0020, ys);
- write_reg(par, 0x0021, height - 1 - xs);
- break;
- }
- write_reg(par, 0x0022); /* Write Data to GRAM */
-}
-
-/* ssd1289 */
-static void flexfb_set_addr_win_2(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R4Eh - Set GDDRAM X address counter */
- /* R4Fh - Set GDDRAM Y address counter */
- case 0:
- write_reg(par, 0x4e, xs);
- write_reg(par, 0x4f, ys);
- break;
- case 180:
- write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
- write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
- write_reg(par, 0x4f, xs);
- break;
- case 90:
- write_reg(par, 0x4e, ys);
- write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
- break;
- }
-
- /* R22h - RAM data write */
- write_reg(par, 0x22, 0);
-}
-
-/* ssd1351 */
-static void set_addr_win_3(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x15, xs, xe);
- write_reg(par, 0x75, ys, ye);
- write_reg(par, 0x5C);
-}
-
-static int flexfb_verify_gpios_dc(struct fbtft_par *par)
-{
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (!par->gpio.dc) {
- dev_err(par->info->device,
- "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int flexfb_verify_gpios_db(struct fbtft_par *par)
-{
- int i;
- int num_db = buswidth;
-
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (!par->gpio.dc) {
- dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (!par->gpio.wr) {
- dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched && !par->gpio.latch) {
- dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched)
- num_db = buswidth / 2;
- for (i = 0; i < num_db; i++) {
- if (!par->gpio.db[i]) {
- dev_err(par->info->device,
- "Missing info about 'db%02d' gpio. Aborting.\n",
- i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
-{
- if (!width)
- width = chip->width;
- if (!height)
- height = chip->height;
- setaddrwin = chip->setaddrwin;
- if (chip->regwidth)
- regwidth = chip->regwidth;
- if (!init_num) {
- initp = chip->init_seq;
- initp_num = chip->init_seq_sz;
- }
-}
-
-static struct fbtft_display flex_display = { };
-
-static int flexfb_chip_init(const struct device *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
- if (!strcmp(chip, flexfb_chip_table[i].name)) {
- flexfb_chip_load_param(&flexfb_chip_table[i]);
- return 0;
- }
-
- dev_err(dev, "chip=%s is not supported\n", chip);
-
- return -EINVAL;
-}
-
-static int flexfb_probe_common(struct spi_device *sdev,
- struct platform_device *pdev)
-{
- struct device *dev;
- struct fb_info *info;
- struct fbtft_par *par;
- int ret;
-
- initp = init;
- initp_num = init_num;
-
- if (sdev)
- dev = &sdev->dev;
- else
- dev = &pdev->dev;
-
- fbtft_init_dbg(dev, "%s(%s)\n", __func__,
- sdev ? "'SPI device'" : "'Platform device'");
-
- if (chip) {
- ret = flexfb_chip_init(dev);
- if (ret)
- return ret;
- }
-
- if (width == 0 || height == 0) {
- dev_err(dev, "argument(s) missing: width and height has to be set.\n");
- return -EINVAL;
- }
- flex_display.width = width;
- flex_display.height = height;
- fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
- fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
- fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
- fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
- fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
-
- info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
- if (!info)
- return -ENOMEM;
-
- par = info->par;
- if (sdev)
- par->spi = sdev;
- else
- par->pdev = pdev;
- if (!par->init_sequence)
- par->init_sequence = initp;
- par->fbtftops.init_display = fbtft_init_display;
-
- /* registerwrite functions */
- switch (regwidth) {
- case 8:
- par->fbtftops.write_register = fbtft_write_reg8_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus8;
- break;
- default:
- dev_err(dev,
- "argument 'regwidth': %d is not supported.\n",
- regwidth);
- return -EINVAL;
- }
-
- /* bus functions */
- if (sdev) {
- par->fbtftops.write = fbtft_write_spi;
- switch (buswidth) {
- case 8:
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- if (!par->startbyte)
- par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
- break;
- case 9:
- if (regwidth == 16) {
- dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n",
- regwidth, buswidth);
- return -EINVAL;
- }
- par->fbtftops.write_register = fbtft_write_reg8_bus9;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
- if (par->spi->master->bits_per_word_mask
- & SPI_BPW_MASK(9)) {
- par->spi->bits_per_word = 9;
- break;
- }
-
- dev_warn(dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
- /* allocate buffer with room for dc bits */
- par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len
- + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
- if (!par->extra) {
- ret = -ENOMEM;
- goto out_release;
- }
- par->fbtftops.write = fbtft_write_spi_emulate_9;
-
- break;
- default:
- dev_err(dev,
- "argument 'buswidth': %d is not supported with SPI.\n",
- buswidth);
- return -EINVAL;
- }
- } else {
- par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
- switch (buswidth) {
- case 8:
- par->fbtftops.write = fbtft_write_gpio8_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus16;
- if (latched)
- par->fbtftops.write = fbtft_write_gpio16_wr_latched;
- else
- par->fbtftops.write = fbtft_write_gpio16_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
- break;
- default:
- dev_err(dev,
- "argument 'buswidth': %d is not supported with parallel.\n",
- buswidth);
- return -EINVAL;
- }
- }
-
- /* set_addr_win function */
- switch (setaddrwin) {
- case 0:
- /* use default */
- break;
- case 1:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
- break;
- case 2:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
- break;
- case 3:
- par->fbtftops.set_addr_win = set_addr_win_3;
- break;
- default:
- dev_err(dev, "argument 'setaddrwin': unknown value %d.\n",
- setaddrwin);
- return -EINVAL;
- }
-
- if (!nobacklight)
- par->fbtftops.register_backlight = fbtft_register_backlight;
-
- ret = fbtft_register_framebuffer(info);
- if (ret < 0)
- goto out_release;
-
- return 0;
-
-out_release:
- fbtft_framebuffer_release(info);
-
- return ret;
-}
-
-static int flexfb_remove_common(struct device *dev, struct fb_info *info)
-{
- struct fbtft_par *par;
-
- if (!info)
- return -EINVAL;
- par = info->par;
- if (par)
- fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
- __func__);
- fbtft_unregister_framebuffer(info);
- fbtft_framebuffer_release(info);
-
- return 0;
-}
-
-static int flexfb_probe_spi(struct spi_device *spi)
-{
- return flexfb_probe_common(spi, NULL);
-}
-
-static int flexfb_remove_spi(struct spi_device *spi)
-{
- struct fb_info *info = spi_get_drvdata(spi);
-
- return flexfb_remove_common(&spi->dev, info);
-}
-
-static int flexfb_probe_pdev(struct platform_device *pdev)
-{
- return flexfb_probe_common(NULL, pdev);
-}
-
-static int flexfb_remove_pdev(struct platform_device *pdev)
-{
- struct fb_info *info = platform_get_drvdata(pdev);
-
- return flexfb_remove_common(&pdev->dev, info);
-}
-
-static struct spi_driver flexfb_spi_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = flexfb_probe_spi,
- .remove = flexfb_remove_spi,
-};
-
-static const struct platform_device_id flexfb_platform_ids[] = {
- { "flexpfb", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
-
-static struct platform_driver flexfb_platform_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .id_table = flexfb_platform_ids,
- .probe = flexfb_probe_pdev,
- .remove = flexfb_remove_pdev,
-};
-
-static int __init flexfb_init(void)
-{
- int ret, ret2;
-
- ret = spi_register_driver(&flexfb_spi_driver);
- ret2 = platform_driver_register(&flexfb_platform_driver);
- if (ret < 0)
- return ret;
- return ret2;
-}
-
-static void __exit flexfb_exit(void)
-{
- spi_unregister_driver(&flexfb_spi_driver);
- platform_driver_unregister(&flexfb_platform_driver);
-}
-
-/* ------------------------------------------------------------------------- */
-
-module_init(flexfb_init);
-module_exit(flexfb_exit);
-
-MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index a62057555d1b..83469061a542 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/* Build the PKO buffer pointer */
hw_buffer.u64 = 0;
if (skb_shinfo(skb)->nr_frags == 0) {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb->len;
} else {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
@@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr =
- XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
+ XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
hw_buffer.s.size = skb_frag_size(fs);
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.gather = 1;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index a4ac3bfb62a8..b78ce9eaab85 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
- return (void *)(physical_address);
+ return (void *)(uintptr_t)(physical_address);
}
static inline uint64_t cvmx_ptr_to_phys(void *ptr)
diff --git a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
index 9ddd51685063..5792f491b59a 100644
--- a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
+++ b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
@@ -409,7 +409,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->PTModeSS = 3;
else if (pRaInfo->HighestRate > 0x0b)
pRaInfo->PTModeSS = 2;
- else if (pRaInfo->HighestRate > 0x0b)
+ else if (pRaInfo->HighestRate > 0x03)
pRaInfo->PTModeSS = 1;
else
pRaInfo->PTModeSS = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 664d93a7f90d..4fac9dca798e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
}
padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!padapter->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
+ if (!padapter->HalData) {
+ DBG_88E("Failed to allocate memory for HAL data\n");
+ goto free_adapter;
+ }
/* step read_chip_version */
rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
new file mode 100644
index 000000000000..be3f5d6962e9
--- /dev/null
+++ b/drivers/staging/speakup/sysfs-driver-speakup
@@ -0,0 +1,369 @@
+What: /sys/accessibility/speakup/attrib_bleep
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Beeps the PC speaker when there is an attribute change such as
+ foreground or background color when using speakup review
+ commands. One = on, zero = off.
+
+What: /sys/accessibility/speakup/bell_pos
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This works much like a typewriter bell. If for example 72 is
+ echoed to bell_pos, it will beep the PC speaker when typing on
+ a line past character 72.
+
+What: /sys/accessibility/speakup/bleeps
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls whether one hears beeps through the PC speaker
+ when using speakup's review commands.
+ TODO: what values does it accept?
+
+What: /sys/accessibility/speakup/bleep_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls the duration of the PC speaker beeps speakup
+ produces.
+ TODO: What are the units? Jiffies?
+
+What: /sys/accessibility/speakup/cursor_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls cursor delay when using arrow keys. When a
+ connection is very slow, with the default setting, when moving
+ with the arrows, or backspacing etc. speakup says the incorrect
+ characters. Set this to a higher value to adjust for the delay
+ and better synchronisation between cursor position and speech.
+
+What: /sys/accessibility/speakup/delimiters
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Delimit a word from speakup.
+ TODO: add more info
+
+What: /sys/accessibility/speakup/ex_num
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/key_echo
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if speakup speaks keys when they are typed. One = on,
+ zero = off or don't echo keys.
+
+What: /sys/accessibility/speakup/keymap
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Speakup keymap remaps keys to Speakup functions.
+ It uses a binary
+ format. A special program called genmap is needed to compile a
+ textual keymap into the binary format which is then loaded into
+ /sys/accessibility/speakup/keymap.
+
+What: /sys/accessibility/speakup/no_interrupt
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if typing interrupts output from speakup. With
+ no_interrupt set to zero, typing on the keyboard will interrupt
+ speakup if for example
+ the say screen command is used before the
+ entire screen is read.
+ With no_interrupt set to one, if the say
+ screen command is used, and one then types on the keyboard,
+ speakup will continue to say the whole screen regardless until
+ it finishes.
+
+What: /sys/accessibility/speakup/punc_all
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to four.
+
+What: /sys/accessibility/speakup/punc_level
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls the level of punctuation spoken as the screen is
+ displayed, not reviewed. Levels range from zero no punctuation,
+ to four, all punctuation. One corresponds to punc_some, two
+ corresponds to punc_most, and three as well as four both
+ correspond to punc_all. Some hardware synthesizers may have
+ different levels each corresponding to three and four for
+ punc_level. Also note that if punc_level is set to zero, and
+ key_echo is set to one, typed punctuation is still spoken as it
+ is typed.
+
+What: /sys/accessibility/speakup/punc_most
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to two.
+
+What: /sys/accessibility/speakup/punc_some
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to one.
+
+What: /sys/accessibility/speakup/reading_punc
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Almost the same as punc_level, the differences being that
+ reading_punc controls the level of punctuation when reviewing
+ the screen with speakup's screen review commands. The other
+ difference is that reading_punc set to three speaks punc_all,
+ and reading_punc set to four speaks all punctuation, including
+ spaces.
+
+What: /sys/accessibility/speakup/repeats
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: A list of characters speakup repeats. Normally, when there are
+ more than three characters in a row, speakup
+ just reads three of
+ those characters. For example, "......" would be read as dot,
+ dot, dot. If a . is added to the list of characters in repeats,
+ "......" would be read as dot, dot, dot, times six.
+
+What: /sys/accessibility/speakup/say_control
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: If set to one, speakup speaks shift, alt and control when those
+ keys are pressed. If say_control is set to zero, shift, ctrl,
+ and alt are not spoken when they are pressed.
+
+What: /sys/accessibility/speakup/say_word_ctl
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/silent
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/spell_delay
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls how fast a word is spelled
+ when speakup's say word
+ review command is pressed twice quickly to speak the current
+ word being reviewed. Zero just speaks the letters one after
+ another, while values one through four
+ seem to introduce more of
+ a pause between the spelling of each letter by speakup.
+
+What: /sys/accessibility/speakup/synth
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the synthesizer driver currently in use. Reading
+ synth returns the synthesizer driver currently in use. Writing
+ synth switches to the given synthesizer driver, provided it is
+ either built into the kernel, or already loaded as a module.
+
+What: /sys/accessibility/speakup/synth_direct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Sends whatever is written to synth_direct
+ directly to the speech synthesizer in use, bypassing speakup.
+ This could be used to make the synthesizer speak
+ a string, or to
+ send control sequences to the synthesizer to change how the
+ synthesizer behaves.
+
+What: /sys/accessibility/speakup/version
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Reading version returns the version of speakup, and the version
+ of the synthesizer driver currently in use.
+
+What: /sys/accessibility/speakup/i18n/announcements
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This file contains various general announcements, most of which
+ cannot be categorized. You will find messages such as "You
+ killed Speakup", "I'm alive", "leaving help", "parked",
+ "unparked", and others. You will also find the names of the
+ screen edges and cursor tracking modes here.
+
+What: /sys/accessibility/speakup/i18n/chartab
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO
+
+What: /sys/accessibility/speakup/i18n/ctl_keys
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Here, you will find names of control keys. These are used with
+ Speakup's say_control feature.
+
+What: /sys/accessibility/speakup/i18n/function_names
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Here, you will find a list of names for Speakup functions.
+ These are used by the help system. For example, suppose that
+ you have activated help mode, and you pressed
+ keypad 3. Speakup
+ says: "keypad 3 is character, say next."
+ The message "character, say next" names a Speakup function, and
+ it comes from this function_names file.
+
+What: /sys/accessibility/speakup/i18n/states
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This file contains names for key states.
+ Again, these are part of the help system. For instance, if you
+ had pressed speakup + keypad 3, you would hear:
+ "speakup keypad 3 is go to bottom edge."
+ The speakup key is depressed, so the name of the key state is
+ speakup.
+ This part of the message comes from the states collection.
+
+What: /sys/accessibility/speakup/i18n/characters
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Through this sys entry, Speakup gives you the ability to change
+ how Speakup pronounces a given character. You could, for
+ example, change how some punctuation characters are spoken. You
+ can even change how Speakup will pronounce certain letters. For
+ further details see '12. Changing the Pronunciation of
+ Characters' in Speakup User's Guide (file spkguide.txt in
+ source).
+
+What: /sys/accessibility/speakup/i18n/colors
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: When you use the "say attributes" function, Speakup says the
+ name of the foreground and background colors. These names come
+ from the i18n/colors file.
+
+What: /sys/accessibility/speakup/i18n/formatted
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This group of messages contains embedded formatting codes, to
+ specify the type and width of displayed data. If you change
+ these, you must preserve all of the formatting codes, and they
+ must appear in the order used by the default messages.
+
+What: /sys/accessibility/speakup/i18n/key_names
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Again, key_names is used by Speakup's help system. In the
+ previous example, Speakup said that you pressed "keypad 3."
+ This name came from the key_names file.
+
+What: /sys/accessibility/speakup/<synth-name>/
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: In `/sys/accessibility/speakup` is a directory corresponding to
+ the synthesizer driver currently in use (E.G) `soft` for the
+ soft driver. This directory contains files which control the
+ speech synthesizer itself,
+ as opposed to controlling the speakup
+ screen reader. The parameters in this directory have the same
+ names and functions across all
+ supported synthesizers. The range
+ of values for freq, pitch, rate, and vol is the same for all
+ supported synthesizers, with the given range being internally
+ mapped by the driver to more or less fit the range of values
+ supported for a given parameter by the individual synthesizer.
+ Below is a description of values and parameters for soft
+ synthesizer, which is currently the most commonly used.
+
+What: /sys/accessibility/speakup/soft/caps_start
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is the string that is sent to the synthesizer to cause it
+ to start speaking uppercase letters. For the soft synthesizer
+ and most others, this causes the pitch of the voice to rise
+ above the currently set pitch.
+
+What: /sys/accessibility/speakup/soft/caps_stop
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is the string sent to the synthesizer to cause it to stop
+ speaking uppercase letters. In the case of the soft synthesizer
+ and most others, this returns the pitch of the voice
+ down to the
+ currently set pitch.
+
+What: /sys/accessibility/speakup/soft/delay_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/direct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if punctuation is spoken by speakup, or by the
+ synthesizer.
+ For example, speakup speaks ">" as "greater", while
+ the espeak synthesizer used by the soft driver speaks "greater
+ than". Zero lets speakup speak the punctuation. One lets the
+ synthesizer itself speak punctuation.
+
+What: /sys/accessibility/speakup/soft/freq
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the frequency of the speech synthesizer. Range is
+ 0-9.
+
+What: /sys/accessibility/speakup/soft/full_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/jiffy_delta
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls how many jiffys the kernel gives to the
+ synthesizer. Setting this too high can make a system unstable,
+ or even crash it.
+
+What: /sys/accessibility/speakup/soft/pitch
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the pitch of the synthesizer. The range is 0-9.
+
+What: /sys/accessibility/speakup/soft/punct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the amount of punctuation spoken by the
+ synthesizer. The range for the soft driver seems to be 0-2.
+ TODO: How is this related to speakup's punc_level, or
+ reading_punc.
+
+What: /sys/accessibility/speakup/soft/rate
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the rate of the synthesizer. Range is from zero
+ slowest, to nine fastest.
+
+What: /sys/accessibility/speakup/soft/tone
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the tone of the speech synthesizer. The range for
+ the soft driver seems to be 0-2. This seems to make no
+ difference if using espeak and the espeakup connector.
+ TODO: does espeakup support different tonalities?
+
+What: /sys/accessibility/speakup/soft/trigger_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/voice
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the voice used by the synthesizer if the
+ synthesizer can speak in more than one voice. The range for the
+ soft driver is 0-7. Note that while espeak supports multiple
+ voices, this parameter will not set the voice when the espeakup
+ connector is used between speakup and espeak.
+
+What: /sys/accessibility/speakup/soft/vol
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the volume of the speech synthesizer. Range is 0-9,
+ with zero being the softest, and nine being the loudest.
+
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index bc1eaa3a0773..826016c3431a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -12,7 +12,7 @@
static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
@@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 23fba01107b9..c6f9cf1913d2 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -289,6 +289,7 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
VC_AUDIO_MSG_TYPE_STOP, false);
}
+/* FIXME: this doesn't seem working as expected for "draining" */
int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream)
{
struct vc_audio_msg m = {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c6bb4aaf9bd0..082302944c37 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1748,8 +1748,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->hw->max_signal = 100;
- if (vnt_init(priv))
+ if (vnt_init(priv)) {
+ device_free_info(priv);
return -ENODEV;
+ }
device_print_info(priv);
pci_set_drvdata(pcid, priv);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index e55c79eb6430..98361acd3053 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -968,6 +968,11 @@ static int __init n_hdlc_init(void)
} /* end of init_module() */
+#ifdef CONFIG_SPARC
+#undef __exitdata
+#define __exitdata
+#endif
+
static const char hdlc_unregister_ok[] __exitdata =
KERN_INFO "N_HDLC: line discipline unregistered\n";
static const char hdlc_unregister_fail[] __exitdata =
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index c68e2b3a1634..836e736ae188 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial8250_do_set_mctrl(port, mctrl);
- if (!up->gpios) {
+ if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) {
/*
* Turn off autoRTS if RTS is lowered and restore autoRTS
* setting if RTS is raised
@@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port,
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW &&
- !up->gpios) {
+ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) &&
+ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
priv->efr |= UART_EFR_CTS;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4789b5d62f63..67a9eb3f94ce 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1032,6 +1032,7 @@ config SERIAL_SIFIVE_CONSOLE
bool "Console on SiFive UART"
depends on SERIAL_SIFIVE=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Select this option if you would like to use a SiFive UART as the
system console.
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 68d74f2b5106..a32f0d2afd59 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -3,7 +3,7 @@
* Freescale linflexuart serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
*/
#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
@@ -246,12 +246,14 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
struct tty_port *port = &sport->state->port;
unsigned long flags, status;
unsigned char rx;
+ bool brk;
spin_lock_irqsave(&sport->lock, flags);
status = readl(sport->membase + UARTSR);
while (status & LINFLEXD_UARTSR_RMB) {
rx = readb(sport->membase + BDRM);
+ brk = false;
flg = TTY_NORMAL;
sport->icount.rx++;
@@ -261,8 +263,11 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
status |= LINFLEXD_UARTSR_SZF;
if (status & LINFLEXD_UARTSR_BOF)
status |= LINFLEXD_UARTSR_BOF;
- if (status & LINFLEXD_UARTSR_FEF)
+ if (status & LINFLEXD_UARTSR_FEF) {
+ if (!rx)
+ brk = true;
status |= LINFLEXD_UARTSR_FEF;
+ }
if (status & LINFLEXD_UARTSR_PE)
status |= LINFLEXD_UARTSR_PE;
}
@@ -271,13 +276,15 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
sport->membase + UARTSR);
status = readl(sport->membase + UARTSR);
- if (uart_handle_sysrq_char(sport, (unsigned char)rx))
- continue;
-
+ if (brk) {
+ uart_handle_break(sport);
+ } else {
#ifdef SUPPORT_SYSRQ
- sport->sysrq = 0;
+ if (uart_handle_sysrq_char(sport, (unsigned char)rx))
+ continue;
#endif
- tty_insert_flip_char(port, rx, flg);
+ tty_insert_flip_char(port, rx, flg);
+ }
}
spin_unlock_irqrestore(&sport->lock, flags);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3e17bb8a0b16..537896c4d887 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -548,7 +548,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
lpuart32_write(&sport->port, val, UARTFIFO);
} else {
- val = readb(sport->port.membase + UARTPFIFO);
+ val = readb(sport->port.membase + UARTCFIFO);
val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
writeb(val, sport->port.membase + UARTCFIFO);
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 87c58f9f6390..5e08f2657b90 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev)
return PTR_ERR(base);
rxirq = platform_get_irq(pdev, 0);
- txirq = platform_get_irq(pdev, 1);
- rtsirq = platform_get_irq(pdev, 2);
+ txirq = platform_get_irq_optional(pdev, 1);
+ rtsirq = platform_get_irq_optional(pdev, 2);
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 03963af77b15..d2d8b3494685 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -740,7 +740,7 @@ static int __init owl_uart_init(void)
return ret;
}
-static void __init owl_uart_exit(void)
+static void __exit owl_uart_exit(void)
{
platform_driver_unregister(&owl_uart_platform_driver);
uart_unregister_driver(&owl_uart_driver);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index c1b0d7662ef9..ff9a27d48bca 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -815,7 +815,7 @@ static int __init rda_uart_init(void)
return ret;
}
-static void __init rda_uart_exit(void)
+static void __exit rda_uart_exit(void)
{
platform_driver_unregister(&rda_uart_platform_driver);
uart_unregister_driver(&rda_uart_driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e713be1d4e9..c4a414a46c7f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
* console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
* The optional form
+ *
* earlycon=<name>,0x<addr>,<options>
* console=<name>,0x<addr>,<options>
+ *
* is also accepted; the returned @iotype will be UPIO_MEM.
*
* Returns 0 on success or -EINVAL on failure
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index d9074303c88e..fb4781292d40 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -66,6 +66,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
+ if (gpios == NULL)
+ return NULL;
+
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4e754a4850e6..22e5d4e13714 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2894,8 +2894,12 @@ static int sci_init_single(struct platform_device *dev,
port->mapbase = res->start;
sci_port->reg_size = resource_size(res);
- for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
- sci_port->irqs[i] = platform_get_irq(dev, i);
+ for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
+ if (i)
+ sci_port->irqs[i] = platform_get_irq_optional(dev, i);
+ else
+ sci_port->irqs[i] = platform_get_irq(dev, i);
+ }
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index b8b912b5a8b9..06e79c11141d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -897,7 +897,8 @@ static int __init ulite_init(void)
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- uart_unregister_driver(&ulite_uart_driver);
+ if (ulite_uart_driver.state)
+ uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index da4563aaaf5c..4e55bc327a54 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1550,7 +1550,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
goto err_out_id;
}
- uartps_major = cdns_uart_uart_driver->tty_driver->major;
cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
/*
@@ -1680,6 +1679,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = NULL;
#endif
+ uartps_major = cdns_uart_uart_driver->tty_driver->major;
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
"cts-override");
return 0;
@@ -1741,6 +1741,12 @@ static int cdns_uart_remove(struct platform_device *pdev)
console_port = NULL;
#endif
+ /* If this is last instance major number should be initialized */
+ mutex_lock(&bitmap_lock);
+ if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
+ uartps_major = 0;
+ mutex_unlock(&bitmap_lock);
+
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return rc;
}
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index c41ddb61b857..b0a29efe7d31 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -159,8 +159,9 @@ static int cdns3_pci_probe(struct pci_dev *pdev,
wrap->plat_dev = platform_device_register_full(&plat_info);
if (IS_ERR(wrap->plat_dev)) {
pci_disable_device(pdev);
+ err = PTR_ERR(wrap->plat_dev);
kfree(wrap);
- return PTR_ERR(wrap->plat_dev);
+ return err;
}
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 06f1e105be4e..1109dc5a4c39 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -160,10 +160,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
if (ret)
goto err;
- if (cdns->dr_mode != USB_DR_MODE_OTG) {
+ /* Initialize idle role to start with */
+ ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+ if (ret)
+ goto err;
+
+ switch (cdns->dr_mode) {
+ case USB_DR_MODE_UNKNOWN:
+ case USB_DR_MODE_OTG:
ret = cdns3_hw_role_switch(cdns);
if (ret)
goto err;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+ if (ret)
+ goto err;
+ break;
+ case USB_DR_MODE_HOST:
+ ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+ if (ret)
+ goto err;
+ break;
}
return ret;
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index 44f652e8b5a2..e71240b386b4 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -234,9 +234,11 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl)
{
+ struct cdns3_endpoint *priv_ep;
__le16 *response_pkt;
u16 usb_status = 0;
u32 recip;
+ u8 index;
recip = ctrl->bRequestType & USB_RECIP_MASK;
@@ -262,9 +264,13 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
case USB_RECIP_INTERFACE:
return cdns3_ep0_delegate_req(priv_dev, ctrl);
case USB_RECIP_ENDPOINT:
- /* check if endpoint is stalled */
+ index = cdns3_ep_addr_to_index(ctrl->wIndex);
+ priv_ep = priv_dev->eps[index];
+
+ /* check if endpoint is stalled or stall is pending */
cdns3_select_ep(priv_dev, ctrl->wIndex);
- if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)))
+ if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
+ (priv_ep->flags & EP_STALL_PENDING))
usb_status = BIT(USB_ENDPOINT_HALT);
break;
default:
@@ -332,7 +338,7 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
* for sending status stage.
* This time should be less then 3ms.
*/
- usleep_range(1000, 2000);
+ mdelay(1);
cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
USB_CMD_STMODE |
USB_STS_TMODE_SEL(tmode - 1));
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 228cdc4ab886..2ca280f4c054 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -2571,6 +2571,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
switch (max_speed) {
case USB_SPEED_FULL:
writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+ writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
break;
case USB_SPEED_HIGH:
writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
@@ -2662,6 +2663,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
{
int ret = 0;
+ /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
+ ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
+ return ret;
+ }
+
cdns3_drd_switch_gadget(cdns, 1);
pm_runtime_get_sync(cdns->dev);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 7fea4999d352..fb8bd60c83f4 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -461,10 +461,12 @@ static int usblp_release(struct inode *inode, struct file *file)
mutex_lock(&usblp_mutex);
usblp->used = 0;
- if (usblp->present) {
+ if (usblp->present)
usblp_unlink_urbs(usblp);
- usb_autopm_put_interface(usblp->intf);
- } else /* finish cleanup from disconnect */
+
+ usb_autopm_put_interface(usblp->intf);
+
+ if (!usblp->present) /* finish cleanup from disconnect */
usblp_cleanup(usblp);
mutex_unlock(&usblp_mutex);
return 0;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 726100d1ac0d..c946d64142ad 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -139,14 +139,14 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "otg");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -157,9 +157,6 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing OTG IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8adb59f8e4f1..86dc1db788a9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3264,14 +3264,14 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -3282,9 +3282,6 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing peripheral IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 8deea8c91e03..5567ed2cddbe 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -16,14 +16,14 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "host");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -34,9 +34,6 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing host IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d7e611645533..d354036ff6c8 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -45,7 +45,7 @@ config USB_AT91
config USB_LPC32XX
tristate "LPC32XX USB Peripheral Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
depends on I2C
select USB_ISP1301
help
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 8414fac74493..3d499d93c083 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -48,6 +48,7 @@
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900 /* in mA */
static const char driver_name[] = "dummy_hcd";
static const char driver_desc[] = "USB Host+Gadget Emulator";
@@ -2432,7 +2433,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
- dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+ dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index b3e073fb88c6..2b1f3cc7819b 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1151,7 +1151,7 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
- switch (((u32) data) & 0x3) {
+ switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
@@ -1252,7 +1252,7 @@ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
- switch (((u32) data) & 0x3) {
+ switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
index f498160df969..3351d07c431f 100644
--- a/drivers/usb/host/xhci-ext-caps.c
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -57,6 +57,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
ret = platform_device_add_properties(pdev, role_switch_props);
if (ret) {
dev_err(dev, "failed to register device properties\n");
+ platform_device_put(pdev);
return ret;
}
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9741cdeea9d7..85ceb43e3405 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3202,10 +3202,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
if (usb_urb_dir_out(urb)) {
len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
- if (len != seg->bounce_len)
+ if (len != new_buff_len)
xhci_warn(xhci,
"WARN Wrong bounce buffer write length: %zu != %d\n",
- len, seg->bounce_len);
+ len, new_buff_len);
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 500865975687..517ec3206f6e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1032,7 +1032,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
writel(command, &xhci->op_regs->command);
xhci->broken_suspend = 0;
if (xhci_handshake(&xhci->op_regs->status,
- STS_SAVE, 0, 10 * 1000)) {
+ STS_SAVE, 0, 20 * 1000)) {
/*
* AMD SNPS xHC 3.0 occasionally does not clear the
* SSS bit of USBSTS and when driver tries to poll
@@ -1108,6 +1108,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
hibernated = true;
if (!hibernated) {
+ /*
+ * Some controllers might lose power during suspend, so wait
+ * for controller not ready bit to clear, just as in xHC init.
+ */
+ retval = xhci_handshake(&xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
+ if (retval) {
+ xhci_warn(xhci, "Controller not ready at resume %d\n",
+ retval);
+ spin_unlock_irq(&xhci->lock);
+ return retval;
+ }
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
@@ -3083,6 +3095,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
u32 ep_flag;
+ int err;
xhci = hcd_to_xhci(hcd);
if (!host_ep->hcpriv)
@@ -3142,7 +3155,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
- xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+ err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+ ep_index, 0);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3156,8 +3179,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
ctrl_ctx, ep_flag, ep_flag);
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
- xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+ err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
udev->slot_id, false);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4674,12 +4705,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
- /* If we found we can't enable hub-initiated LPM, or
+ /* If we found we can't enable hub-initiated LPM, and
* the U1 or U2 exit latency was too high to allow
- * device-initiated LPM as well, just stop searching.
+ * device-initiated LPM as well, then we will disable LPM
+ * for this device, so stop searching any further.
*/
- if (alt_timeout == USB3_LPM_DISABLED ||
- alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+ if (alt_timeout == USB3_LPM_DISABLED) {
*timeout = alt_timeout;
return -E2BIG;
}
@@ -4790,10 +4821,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
- dev_dbg(&udev->dev, "Hub-initiated %s disabled "
- "at request of driver %s\n",
- state_name, driver->name);
- return xhci_get_timeout_no_hub_lpm(udev, state);
+ dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+ state_name, driver->name);
+ timeout = xhci_get_timeout_no_hub_lpm(udev,
+ state);
+ if (timeout == USB3_LPM_DISABLED)
+ return timeout;
}
}
@@ -5077,11 +5110,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
hcd->has_tt = 1;
} else {
/*
- * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
- * minor revision instead of sbrn. Minor revision is a two digit
- * BCD containing minor and sub-minor numbers, only show minor.
+ * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+ * should return 0x31 for sbrn, or that the minor revision
+ * is a two digit BCD containig minor and sub-minor numbers.
+ * This was later clarified in xHCI 1.2.
+ *
+ * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+ * minor revision set to 0x1 instead of 0x10.
*/
- minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+ if (xhci->usb3_rhub.min_rev == 0x1)
+ minor_rev = 1;
+ else
+ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
switch (minor_rev) {
case 2:
@@ -5198,8 +5238,16 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
+ /*
+ * udev might be NULL if tt buffer is cleared during a failed device
+ * enumeration due to a halted control endpoint. Usb core might
+ * have allocated a new udev for the next enumeration attempt.
+ */
+
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *)ep->hcpriv;
+ if (!udev)
+ return;
slot_id = udev->slot_id;
ep_index = xhci_get_endpoint_index(&ep->desc);
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 0a57c2cc8e5a..7a6b122c833f 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -716,6 +716,10 @@ static int mts_usb_probe(struct usb_interface *intf,
}
+ if (ep_in_current != &ep_in_set[2]) {
+ MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+ return -ENODEV;
+ }
if ( ep_out == -1 ) {
MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index bdae62b2ffe0..9bce583aada3 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -47,16 +47,6 @@ config USB_SEVSEG
To compile this driver as a module, choose M here: the
module will be called usbsevseg.
-config USB_RIO500
- tristate "USB Diamond Rio500 support"
- help
- Say Y here if you want to connect a USB Rio500 mp3 player to your
- computer's USB port. Please read <file:Documentation/usb/rio.rst>
- for more information.
-
- To compile this driver as a module, choose M here: the
- module will be called rio500.
-
config USB_LEGOTOWER
tristate "USB Lego Infrared Tower support"
help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 109f54f5b9aa..0d416eb624bb 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LD) += ldusb.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
-obj-$(CONFIG_USB_RIO500) += rio500.o
obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 344d523b0502..6f5edb9fc61e 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -75,6 +75,7 @@ struct adu_device {
char serial_number[8];
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char *read_buffer_primary;
int read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
{
unsigned long flags;
- if (dev->udev == NULL)
+ if (dev->disconnected)
return;
/* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
kfree(dev->read_buffer_secondary);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree(dev);
}
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
}
dev = usb_get_intfdata(interface);
- if (!dev || !dev->udev) {
+ if (!dev) {
retval = -ENODEV;
goto exit_no_device;
}
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
}
adu_release_internal(dev);
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
if (!dev->open_count) /* ... and we're the last user */
adu_delete(dev);
@@ -354,7 +356,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -518,7 +520,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
goto exit_nolock;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -663,7 +665,7 @@ static int adu_probe(struct usb_interface *interface,
mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
@@ -762,14 +764,18 @@ static void adu_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata(interface);
- mutex_lock(&dev->mtx); /* not interruptible */
- dev->udev = NULL; /* poison */
usb_deregister_dev(interface, &adu_class);
- mutex_unlock(&dev->mtx);
+
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&adutux_mutex);
usb_set_intfdata(interface, NULL);
+ mutex_lock(&dev->mtx); /* not interruptible */
+ dev->disconnected = 1;
+ mutex_unlock(&dev->mtx);
+
/* if the device is not opened, then we clean up right now */
if (!dev->open_count)
adu_delete(dev);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index cf5828ce927a..34e6cd6f40d3 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
usb_free_urb(dev->urb);
kfree(dev->name);
kfree(dev->buf);
+ usb_put_intf(dev->interface);
kfree(dev);
}
}
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
if (dev == NULL)
goto out;
+ dev->interface = usb_get_intf(interface);
+
dev->buf = kmalloc(size, GFP_KERNEL);
if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
goto out;
}
- dev->interface = interface;
-
dev->in_ep = in_ep;
if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index f5bed9f29e56..dce44fbf031f 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -54,11 +54,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-/* Module parameters */
-static DEFINE_MUTEX(iowarrior_mutex);
-
static struct usb_driver iowarrior_driver;
-static DEFINE_MUTEX(iowarrior_open_disc_lock);
/*--------------*/
/* data */
@@ -87,6 +83,7 @@ struct iowarrior {
char chip_serial[9]; /* the serial number string of the chip connected */
int report_size; /* number of bytes in a report */
u16 product_id;
+ struct usb_anchor submitted;
};
/*--------------*/
@@ -243,6 +240,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
kfree(dev->int_in_buffer);
usb_free_urb(dev->int_in_urb);
kfree(dev->read_queue);
+ usb_put_intf(dev->interface);
kfree(dev);
}
@@ -424,11 +422,13 @@ static ssize_t iowarrior_write(struct file *file,
retval = -EFAULT;
goto error;
}
+ usb_anchor_urb(int_out_urb, &dev->submitted);
retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
if (retval) {
dev_dbg(&dev->interface->dev,
"submit error %d for urb nr.%d\n",
retval, atomic_read(&dev->write_busy));
+ usb_unanchor_urb(int_out_urb);
goto error;
}
/* submit was ok */
@@ -477,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
if (!buffer)
return -ENOMEM;
- /* lock this object */
- mutex_lock(&iowarrior_mutex);
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
@@ -571,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
error_out:
/* unlock the device */
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_mutex);
kfree(buffer);
return retval;
}
@@ -586,27 +583,20 @@ static int iowarrior_open(struct inode *inode, struct file *file)
int subminor;
int retval = 0;
- mutex_lock(&iowarrior_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
- mutex_unlock(&iowarrior_mutex);
- printk(KERN_ERR "%s - error, can't find device for minor %d\n",
+ pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
- mutex_lock(&iowarrior_open_disc_lock);
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&iowarrior_open_disc_lock);
- mutex_unlock(&iowarrior_mutex);
+ if (!dev)
return -ENODEV;
- }
mutex_lock(&dev->mutex);
- mutex_unlock(&iowarrior_open_disc_lock);
/* Only one process can open each device, no sharing. */
if (dev->opened) {
@@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_mutex);
return retval;
}
@@ -764,11 +753,13 @@ static int iowarrior_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->write_wait);
dev->udev = udev;
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ init_usb_anchor(&dev->submitted);
+
res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
if (res) {
dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -836,7 +827,6 @@ static int iowarrior_probe(struct usb_interface *interface,
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev, "Not able to get a minor for this device.\n");
- usb_set_intfdata(interface, NULL);
goto error;
}
@@ -860,26 +850,15 @@ error:
*/
static void iowarrior_disconnect(struct usb_interface *interface)
{
- struct iowarrior *dev;
- int minor;
-
- dev = usb_get_intfdata(interface);
- mutex_lock(&iowarrior_open_disc_lock);
- usb_set_intfdata(interface, NULL);
- /* prevent device read, write and ioctl */
- dev->present = 0;
-
- minor = dev->minor;
- mutex_unlock(&iowarrior_open_disc_lock);
- /* give back our minor - this will call close() locks need to be dropped at this point*/
+ struct iowarrior *dev = usb_get_intfdata(interface);
+ int minor = dev->minor;
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
/* prevent device read, write and ioctl */
-
- mutex_unlock(&dev->mutex);
+ dev->present = 0;
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +866,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
Deleting the device is postponed until close() was called.
*/
usb_kill_urb(dev->int_in_urb);
+ usb_kill_anchored_urbs(&dev->submitted);
wake_up_interruptible(&dev->read_wait);
wake_up_interruptible(&dev->write_wait);
+ mutex_unlock(&dev->mutex);
} else {
/* no process is using the device, cleanup now */
+ mutex_unlock(&dev->mutex);
iowarrior_delete(dev);
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 6581774bdfa4..f3108d85e768 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
struct ld_usb {
struct mutex mutex; /* locks this structure */
struct usb_interface *intf; /* save off the usb interface pointer */
+ unsigned long disconnected:1;
int open_count; /* number of times this port has been opened */
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
/* shutdown transfer */
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
- if (dev->intf)
- usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
if (dev->interrupt_out_busy)
- if (dev->intf)
- usb_kill_urb(dev->interrupt_out_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
}
/**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
*/
static void ld_usb_delete(struct ld_usb *dev)
{
- ld_usb_abort_transfers(dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+ if (dev->interrupt_in_running && !dev->buffer_overflow) {
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev,
@@ -392,7 +389,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->mutex);
/* unlock here as ld_usb_delete frees dev */
@@ -423,7 +420,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->intf)
+ if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +459,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -542,7 +539,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -764,6 +761,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
/* give back our minor */
usb_deregister_dev(intf, &ld_usb_class);
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->mutex);
/* if the device is not opened, then we clean up right now */
@@ -771,7 +771,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mutex);
ld_usb_delete(dev);
} else {
- dev->intf = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 006cf13b2199..9d4c52a7ebe0 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
};
MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -191,6 +190,7 @@ struct lego_usb_tower {
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char* read_buffer;
size_t read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
*/
static inline void tower_delete (struct lego_usb_tower *dev)
{
- tower_abort_transfers (dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree (dev->read_buffer);
kfree (dev->interrupt_in_buffer);
kfree (dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree (dev);
}
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
goto exit;
}
- mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
-
if (!dev) {
- mutex_unlock(&open_disc_mutex);
retval = -ENODEV;
goto exit;
}
/* lock this device */
if (mutex_lock_interruptible(&dev->lock)) {
- mutex_unlock(&open_disc_mutex);
retval = -ERESTARTSYS;
goto exit;
}
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
/* allow opening only once */
if (dev->open_count) {
- mutex_unlock(&open_disc_mutex);
retval = -EBUSY;
goto unlock_exit;
}
- dev->open_count = 1;
- mutex_unlock(&open_disc_mutex);
/* reset the tower */
result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
dev->interrupt_in_running = 0;
- dev->open_count = 0;
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
+ dev->open_count = 1;
+
unlock_exit:
mutex_unlock(&dev->lock);
@@ -423,10 +416,9 @@ static int tower_release (struct inode *inode, struct file *file)
if (dev == NULL) {
retval = -ENODEV;
- goto exit_nolock;
+ goto exit;
}
- mutex_lock(&open_disc_mutex);
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
@@ -438,7 +430,8 @@ static int tower_release (struct inode *inode, struct file *file)
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->udev == NULL) {
+
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
@@ -456,10 +449,7 @@ static int tower_release (struct inode *inode, struct file *file)
unlock_exit:
mutex_unlock(&dev->lock);
-
exit:
- mutex_unlock(&open_disc_mutex);
-exit_nolock:
return retval;
}
@@ -477,10 +467,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
mb();
- if (dev->udev)
- usb_kill_urb (dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
- if (dev->interrupt_out_busy && dev->udev)
+ if (dev->interrupt_out_busy)
usb_kill_urb(dev->interrupt_out_urb);
}
@@ -516,7 +505,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->udev)
+ if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +552,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -649,7 +638,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -759,7 +748,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && dev->udev) {
+ if (dev->interrupt_in_running) {
retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->udev->dev,
@@ -822,8 +811,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
mutex_init(&dev->lock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->open_count = 0;
+ dev->disconnected = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
@@ -891,8 +881,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
- if (result < 0) {
- dev_err(idev, "LEGO USB Tower get version control request failed\n");
+ if (result < sizeof(*get_version_reply)) {
+ if (result >= 0)
+ result = -EIO;
+ dev_err(idev, "get version request failed: %d\n", result);
retval = result;
goto error;
}
@@ -910,7 +902,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
- usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
@@ -942,23 +933,24 @@ static void tower_disconnect (struct usb_interface *interface)
int minor;
dev = usb_get_intfdata (interface);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata (interface, NULL);
minor = dev->minor;
- /* give back our minor */
+ /* give back our minor and prevent further open() */
usb_deregister_dev (interface, &tower_class);
+ /* stop I/O */
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
tower_delete (dev);
} else {
- dev->udev = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644
index 30cae5e1954d..000000000000
--- a/drivers/usb/misc/rio500.c
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/*
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- *
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003 replaced lock/unlock kernel with up/down
- * Daniele Bellucci bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR 64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
- struct usb_device *rio_dev; /* init: probe_rio */
- unsigned int ifnum; /* Interface number of the USB device */
- int isopen; /* nz if open */
- int present; /* Device is present on the bus */
- char *obuf, *ibuf; /* transfer buffers */
- char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
- wait_queue_head_t wait_q; /* for timeouts */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- /* against disconnect() */
- mutex_lock(&rio500_mutex);
-
- if (rio->isopen || !rio->present) {
- mutex_unlock(&rio500_mutex);
- return -EBUSY;
- }
- rio->isopen = 1;
-
- init_waitqueue_head(&rio->wait_q);
-
-
- dev_info(&rio->rio_dev->dev, "Rio opened.\n");
- mutex_unlock(&rio500_mutex);
-
- return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- /* against disconnect() */
- mutex_lock(&rio500_mutex);
-
- rio->isopen = 0;
- if (!rio->present) {
- /* cleanup has been delayed */
- kfree(rio->ibuf);
- kfree(rio->obuf);
- rio->ibuf = NULL;
- rio->obuf = NULL;
- } else {
- dev_info(&rio->rio_dev->dev, "Rio closed.\n");
- }
- mutex_unlock(&rio500_mutex);
- return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct RioCommand rio_cmd;
- struct rio_usb_data *rio = &rio_instance;
- void __user *data;
- unsigned char *buffer;
- int result, requesttype;
- int retries;
- int retval=0;
-
- mutex_lock(&rio500_mutex);
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- retval = -ENODEV;
- goto err_out;
- }
-
- switch (cmd) {
- case RIO_RECV_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- retval = -EFAULT;
- free_page((unsigned long) buffer);
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_IN |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_rcvctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d (data=%02x)\n",
- result, buffer[0]);
- if (copy_to_user(rio_cmd.buffer, buffer,
- rio_cmd.length)) {
- free_page((unsigned long) buffer);
- retval = -EFAULT;
- goto err_out;
- }
- retries = 0;
- }
-
- /* rio_cmd.buffer contains a raw stream of single byte
- data which has been returned from rio. Data is
- interpreted at application level. For data that
- will be cast to data types longer than 1 byte, data
- will be little_endian and will potentially need to
- be swapped at the app level */
-
- }
- free_page((unsigned long) buffer);
- break;
-
- case RIO_SEND_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- free_page((unsigned long)buffer);
- retval = -EFAULT;
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_OUT |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_sndctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d\n", result);
- retries = 0;
-
- }
-
- }
- free_page((unsigned long) buffer);
- break;
-
- default:
- retval = -ENOTTY;
- break;
- }
-
-
-err_out:
- mutex_unlock(&rio500_mutex);
- return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
- size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
-
- unsigned long copy_size;
- unsigned long bytes_written = 0;
- unsigned int partial;
-
- int result = 0;
- int maxretry;
- int errn = 0;
- int intr;
-
- intr = mutex_lock_interruptible(&rio500_mutex);
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
-
-
-
- do {
- unsigned long thistime;
- char *obuf = rio->obuf;
-
- thistime = copy_size =
- (count >= OBUF_SIZE) ? OBUF_SIZE : count;
- if (copy_from_user(rio->obuf, buffer, copy_size)) {
- errn = -EFAULT;
- goto error;
- }
- maxretry = 5;
- while (thistime) {
- if (!rio->rio_dev) {
- errn = -ENODEV;
- goto error;
- }
- if (signal_pending(current)) {
- mutex_unlock(&rio500_mutex);
- return bytes_written ? bytes_written : -EINTR;
- }
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_sndbulkpipe(rio->rio_dev, 2),
- obuf, thistime, &partial, 5000);
-
- dev_dbg(&rio->rio_dev->dev,
- "write stats: result:%d thistime:%lu partial:%u\n",
- result, thistime, partial);
-
- if (result == -ETIMEDOUT) { /* NAK - so hold for a while */
- if (!maxretry--) {
- errn = -ETIME;
- goto error;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (!result && partial) {
- obuf += partial;
- thistime -= partial;
- } else
- break;
- }
- if (result) {
- dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
- result);
- errn = -EIO;
- goto error;
- }
- bytes_written += copy_size;
- count -= copy_size;
- buffer += copy_size;
- } while (count > 0);
-
- mutex_unlock(&rio500_mutex);
-
- return bytes_written ? bytes_written : -EIO;
-
-error:
- mutex_unlock(&rio500_mutex);
- return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
- ssize_t read_count;
- unsigned int partial;
- int this_read;
- int result;
- int maxretry = 10;
- char *ibuf;
- int intr;
-
- intr = mutex_lock_interruptible(&rio500_mutex);
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
-
- ibuf = rio->ibuf;
-
- read_count = 0;
-
-
- while (count > 0) {
- if (signal_pending(current)) {
- mutex_unlock(&rio500_mutex);
- return read_count ? read_count : -EINTR;
- }
- if (!rio->rio_dev) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
- this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_rcvbulkpipe(rio->rio_dev, 1),
- ibuf, this_read, &partial,
- 8000);
-
- dev_dbg(&rio->rio_dev->dev,
- "read stats: result:%d this_read:%u partial:%u\n",
- result, this_read, partial);
-
- if (partial) {
- count = this_read = partial;
- } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
- if (!maxretry--) {
- mutex_unlock(&rio500_mutex);
- dev_err(&rio->rio_dev->dev,
- "read_rio: maxretry timeout\n");
- return -ETIME;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (result != -EREMOTEIO) {
- mutex_unlock(&rio500_mutex);
- dev_err(&rio->rio_dev->dev,
- "Read Whoops - result:%d partial:%u this_read:%u\n",
- result, partial, this_read);
- return -EIO;
- } else {
- mutex_unlock(&rio500_mutex);
- return (0);
- }
-
- if (this_read) {
- if (copy_to_user(buffer, ibuf, this_read)) {
- mutex_unlock(&rio500_mutex);
- return -EFAULT;
- }
- count -= this_read;
- read_count += this_read;
- buffer += this_read;
- }
- }
- mutex_unlock(&rio500_mutex);
- return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
- .owner = THIS_MODULE,
- .read = read_rio,
- .write = write_rio,
- .unlocked_ioctl = ioctl_rio,
- .open = open_rio,
- .release = close_rio,
- .llseek = noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
- .name = "rio500%d",
- .fops = &usb_rio_fops,
- .minor_base = RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct rio_usb_data *rio = &rio_instance;
- int retval = -ENOMEM;
- char *ibuf, *obuf;
-
- if (rio->present) {
- dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
- return -EBUSY;
- }
- dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-
- obuf = kmalloc(OBUF_SIZE, GFP_KERNEL);
- if (!obuf) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the output buffer\n");
- goto err_obuf;
- }
- dev_dbg(&intf->dev, "obuf address: %p\n", obuf);
-
- ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL);
- if (!ibuf) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the input buffer\n");
- goto err_ibuf;
- }
- dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf);
-
- mutex_lock(&rio500_mutex);
- rio->rio_dev = dev;
- rio->ibuf = ibuf;
- rio->obuf = obuf;
- rio->present = 1;
- mutex_unlock(&rio500_mutex);
-
- retval = usb_register_dev(intf, &usb_rio_class);
- if (retval) {
- dev_err(&dev->dev,
- "Not able to get a minor for this device.\n");
- goto err_register;
- }
-
- usb_set_intfdata(intf, rio);
- return retval;
-
- err_register:
- mutex_lock(&rio500_mutex);
- rio->present = 0;
- mutex_unlock(&rio500_mutex);
- err_ibuf:
- kfree(obuf);
- err_obuf:
- return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
- struct rio_usb_data *rio = usb_get_intfdata (intf);
-
- usb_set_intfdata (intf, NULL);
- if (rio) {
- usb_deregister_dev(intf, &usb_rio_class);
-
- mutex_lock(&rio500_mutex);
- if (rio->isopen) {
- rio->isopen = 0;
- /* better let it finish - the release will do whats needed */
- rio->rio_dev = NULL;
- mutex_unlock(&rio500_mutex);
- return;
- }
- kfree(rio->ibuf);
- kfree(rio->obuf);
-
- dev_info(&intf->dev, "USB Rio disconnected.\n");
-
- rio->present = 0;
- mutex_unlock(&rio500_mutex);
- }
-}
-
-static const struct usb_device_id rio_table[] = {
- { USB_DEVICE(0x0841, 1) }, /* Rio 500 */
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
- .name = "rio500",
- .probe = probe_rio,
- .disconnect = disconnect_rio,
- .id_table = rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644
index 6db7a5863496..000000000000
--- a/drivers/usb/misc/rio500_usb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* ----------------------------------------------------------------------
- Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
- ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND 0x1
-#define RIO_RECV_COMMAND 0x2
-
-#define RIO_DIR_OUT 0x0
-#define RIO_DIR_IN 0x1
-
-struct RioCommand {
- short length;
- int request;
- int requesttype;
- int value;
- int index;
- void __user *buffer;
- int timeout;
-};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9ba4a4e68d91..61e9e987fe4a 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -29,16 +30,12 @@
#define IOCTL_GET_DRV_VERSION 2
-static DEFINE_MUTEX(lcd_mutex);
static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
-static DEFINE_MUTEX(open_disc_mutex);
-
-
struct usb_lcd {
struct usb_device *udev; /* init: probe_lcd */
struct usb_interface *interface; /* the interface for
@@ -57,6 +54,8 @@ struct usb_lcd {
using up all RAM */
struct usb_anchor submitted; /* URBs to wait for
before suspend */
+ struct rw_semaphore io_rwsem;
+ unsigned long disconnected:1;
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
@@ -81,40 +80,29 @@ static int lcd_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor, r;
- mutex_lock(&lcd_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
- mutex_unlock(&lcd_mutex);
- printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n",
+ pr_err("USBLCD: %s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
- mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
- mutex_unlock(&lcd_mutex);
- return -ENODEV;
- }
/* increment our usage count for the device */
kref_get(&dev->kref);
- mutex_unlock(&open_disc_mutex);
/* grab a power reference */
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
- mutex_unlock(&lcd_mutex);
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
- mutex_unlock(&lcd_mutex);
return 0;
}
@@ -142,6 +130,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
dev = file->private_data;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto out_up_io;
+ }
+
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev,
@@ -158,6 +153,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
retval = bytes_read;
}
+out_up_io:
+ up_read(&dev->io_rwsem);
+
return retval;
}
@@ -173,14 +171,12 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
- mutex_lock(&lcd_mutex);
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
sprintf(buf, "%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
- mutex_unlock(&lcd_mutex);
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
@@ -237,11 +233,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
if (r < 0)
return -EINTR;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto err_up_io;
+ }
+
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
- goto err_no_buf;
+ goto err_up_io;
}
buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +281,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
the USB core will eventually free it entirely */
usb_free_urb(urb);
+ up_read(&dev->io_rwsem);
exit:
return count;
error_unanchor:
@@ -285,7 +289,8 @@ error_unanchor:
error:
usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+ up_read(&dev->io_rwsem);
up(&dev->limit_sem);
return retval;
}
@@ -325,6 +330,7 @@ static int lcd_probe(struct usb_interface *interface,
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+ init_rwsem(&dev->io_rwsem);
init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -365,7 +371,6 @@ static int lcd_probe(struct usb_interface *interface,
/* something prevented us from registering this driver */
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
- usb_set_intfdata(interface, NULL);
goto error;
}
@@ -411,17 +416,18 @@ static int lcd_resume(struct usb_interface *intf)
static void lcd_disconnect(struct usb_interface *interface)
{
- struct usb_lcd *dev;
+ struct usb_lcd *dev = usb_get_intfdata(interface);
int minor = interface->minor;
- mutex_lock(&open_disc_mutex);
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
- mutex_unlock(&open_disc_mutex);
-
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
+ down_write(&dev->io_rwsem);
+ dev->disconnected = 1;
+ up_write(&dev->io_rwsem);
+
+ usb_kill_anchored_urbs(&dev->submitted);
+
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6715a128e6c8..be0505b8b5d4 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -60,6 +60,7 @@ struct usb_yurex {
struct kref kref;
struct mutex io_mutex;
+ unsigned long disconnected:1;
struct fasync_struct *async_queue;
wait_queue_head_t waitq;
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
+ usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev);
}
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
switch (status) {
case 0: /*success*/
break;
+ /* The device is terminated or messed up, give up */
case -EOVERFLOW:
dev_err(&dev->interface->dev,
"%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
case -EILSEQ:
- /* The device is terminated, clean up */
+ case -EPROTO:
+ case -ETIME:
return;
default:
dev_err(&dev->interface->dev,
"%s - unknown status received: %d\n", __func__, status);
- goto exit;
+ return;
}
/* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
break;
}
-exit:
retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
init_waitqueue_head(&dev->waitq);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
/* prevent more I/O from starting */
usb_poison_urb(dev->urb);
+ usb_poison_urb(dev->cntl_urb);
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
/* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
dev = file->private_data;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
return -ENODEV;
}
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
goto error;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index d1a0a35ecfff..0824099b905e 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -211,6 +211,7 @@ struct usbhs_priv;
/* DCPCTR */
#define BSTS (1 << 15) /* Buffer Status */
#define SUREQ (1 << 14) /* Sending SETUP Token */
+#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */
#define CSSTS (1 << 12) /* CSSTS Status */
#define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */
#define SQCLR (1 << 8) /* Toggle Bit Clear */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 2a01ceb71641..86637cd066cf 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
list_del_init(&pkt->node);
}
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 88d1816bcda2..c3d3cc35cee0 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
#endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4d571a5205e2..e5ef56991dba 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -722,8 +722,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
unsigned long flags;
-
- usbhsg_pipe_disable(uep);
+ int ret = 0;
dev_dbg(dev, "set halt %d (pipe %d)\n",
halt, usbhs_pipe_number(pipe));
@@ -731,6 +730,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
/******************** spin lock ********************/
usbhs_lock(priv, flags);
+ /*
+ * According to usb_ep_set_halt()'s description, this function should
+ * return -EAGAIN if the IN endpoint has any queue or data. Note
+ * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+ * IN endpoint in the gadget mode.
+ */
+ if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+ usbhs_pipe_contains_transmittable_data(pipe))) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
if (halt)
usbhs_pipe_stall(pipe);
else
@@ -741,10 +752,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
+out:
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- return 0;
+ return ret;
}
static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c4922b96c93b..9e5afdde1adb 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
return -EBUSY;
}
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+ u16 val;
+
+ /* Do not support for DCP pipe */
+ if (usbhs_pipe_is_dcp(pipe))
+ return false;
+
+ val = usbhsp_pipectrl_get(pipe);
+ if (val & INBUFM)
+ return true;
+
+ return false;
+}
+
/*
* PID ctrl
*/
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3080423e600c..3b130529408b 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
int needs_bfre, int bfre_enable);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f0688c44b04c..25e81faf4c24 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1030,6 +1030,9 @@ static const struct usb_device_id id_table_combined[] = {
/* EZPrototypes devices */
{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+ /* Sienna devices */
+ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+ { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f12d806220b4..22d66217cb41 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,9 @@
#define FTDI_LUMEL_PD12_PID 0x6002
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID 0x8348
+
/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
#define CYBER_CORTEX_AV_PID 0x8698
@@ -689,6 +692,12 @@
#define BANDB_ZZ_PROG1_USB_PID 0xBA02
/*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID 0x0920
+#define ECHELON_U20_PID 0x7500
+
+/*
* Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
*/
#define INTREPID_VID 0x093C
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index d34779fe4a8d..e66a59ef43a1 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
- /* leak the urb, something's wrong and the callers don't care */
- return urb;
+ usb_free_urb(urb);
+ return NULL;
}
if (usb_endpoint_xfer_int(ep_desc)) {
ep_type_name = "INT";
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 38e920ac7f82..06ab016be0b6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
+#define CINTERION_PRODUCT_CLS8 0x00b0
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index a3179fea38c8..8f066bb55d7d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -314,10 +314,7 @@ static void serial_cleanup(struct tty_struct *tty)
serial = port->serial;
owner = serial->type->driver.owner;
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- mutex_unlock(&serial->disc_mutex);
+ usb_autopm_put_interface(serial->interface);
usb_serial_put(serial);
module_put(owner);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 96562744101c..5f61d9977a15 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4409,18 +4409,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
/* USB data support is optional */
ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
if (ret == 0) {
- port->typec_caps.data = typec_find_port_data_role(cap_str);
- if (port->typec_caps.data < 0)
- return -EINVAL;
+ ret = typec_find_port_data_role(cap_str);
+ if (ret < 0)
+ return ret;
+ port->typec_caps.data = ret;
}
ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
if (ret < 0)
return ret;
- port->typec_caps.type = typec_find_port_power_role(cap_str);
- if (port->typec_caps.type < 0)
- return -EINVAL;
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+ port->typec_caps.type = ret;
port->port_type = port->typec_caps.type;
if (port->port_type == TYPEC_PORT_SNK)
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 6c103697c582..d99700cb4dca 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -75,6 +75,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
if (cur != 0xff) {
mutex_unlock(&dp->con->lock);
+ if (dp->con->port_altmode[cur] == alt)
+ return 0;
return -EBUSY;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 907e20e1a71e..d772fce51905 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -195,7 +195,6 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
- bool run_isr; /* flag to call ISR routine during resume */
struct work_struct pm_work;
};
@@ -224,18 +223,6 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
if (quirks && quirks->max_read_len)
max_read_len = quirks->max_read_len;
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- /*
- * Do not schedule pm_work to run ISR in
- * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
- * since we are already in ISR path.
- */
- uc->run_isr = false;
- mutex_unlock(&uc->lock);
- }
-
pm_runtime_get_sync(uc->dev);
while (rem_len > 0) {
msgs[1].buf = &data[len - rem_len];
@@ -278,18 +265,6 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
msgs[0].len = len + sizeof(rab);
msgs[0].buf = buf;
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- /*
- * Do not schedule pm_work to run ISR in
- * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
- * since we are already in ISR path.
- */
- uc->run_isr = false;
- mutex_unlock(&uc->lock);
- }
-
pm_runtime_get_sync(uc->dev);
status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (status < 0) {
@@ -1130,7 +1105,6 @@ static int ucsi_ccg_probe(struct i2c_client *client,
uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
- uc->run_isr = true;
mutex_init(&uc->lock);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1188,6 +1162,8 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_set_active(uc->dev);
pm_runtime_enable(uc->dev);
+ pm_runtime_use_autosuspend(uc->dev);
+ pm_runtime_set_autosuspend_delay(uc->dev, 5000);
pm_runtime_idle(uc->dev);
return 0;
@@ -1229,7 +1205,6 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ucsi_ccg *uc = i2c_get_clientdata(client);
- bool schedule = true;
/*
* Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
@@ -1237,17 +1212,8 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
* Schedule a work to call ISR as a workaround.
*/
if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- if (!uc->run_isr) {
- uc->run_isr = true;
- schedule = false;
- }
- mutex_unlock(&uc->lock);
-
- if (schedule)
- schedule_work(&uc->pm_work);
- }
+ uc->fw_version <= CCG_OLD_FW_VERSION)
+ schedule_work(&uc->pm_work);
return 0;
}
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index c31d17d05810..2dc58766273a 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -61,6 +61,7 @@ struct usb_skel {
spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
+ unsigned long disconnected:1;
wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -73,6 +74,7 @@ static void skel_delete(struct kref *kref)
struct usb_skel *dev = to_skel_dev(kref);
usb_free_urb(dev->bulk_in_urb);
+ usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
@@ -124,10 +126,7 @@ static int skel_release(struct inode *inode, struct file *file)
return -ENODEV;
/* allow the device to be autosuspended */
- mutex_lock(&dev->io_mutex);
- if (dev->interface)
- usb_autopm_put_interface(dev->interface);
- mutex_unlock(&dev->io_mutex);
+ usb_autopm_put_interface(dev->interface);
/* decrement the count on our device */
kref_put(&dev->kref, skel_delete);
@@ -231,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
dev = file->private_data;
- /* if we cannot read at all, return EOF */
- if (!dev->bulk_in_urb || !count)
+ if (!count)
return 0;
/* no concurrent readers */
@@ -240,7 +238,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
if (rv < 0)
return rv;
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
rv = -ENODEV;
goto exit;
}
@@ -422,7 +420,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
/* this lock makes sure we don't submit URBs to gone devices */
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
@@ -507,7 +505,7 @@ static int skel_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->bulk_in_wait);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
@@ -573,9 +571,10 @@ static void skel_disconnect(struct usb_interface *interface)
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
+ usb_kill_urb(dev->bulk_in_urb);
usb_kill_anchored_urbs(&dev->submitted);
/* decrement our usage count */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 585a84d319bd..65850e9c7190 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1195,12 +1195,12 @@ static int vhci_start(struct usb_hcd *hcd)
if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
err = vhci_init_attr_group();
if (err) {
- pr_err("init attr group\n");
+ dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err);
return err;
}
err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
if (err) {
- pr_err("create sysfs files\n");
+ dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err);
vhci_finish_attr_group();
return err;
}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 7804869c6a31..056308008288 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -161,6 +161,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_stop(n, &private);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
@@ -237,6 +238,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 75fd140b02ff..43c391626a00 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr(
if (!bounce_buf)
return -ENOMEM;
+ *bounce_buf_ret = bounce_buf;
+
if (copy_in) {
ret = copy_from_user(bounce_buf, (void __user *)buf, len);
if (ret)
@@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr(
memset(bounce_buf, 0, len);
}
- *bounce_buf_ret = bounce_buf;
hgcm_call_add_pagelist_size(bounce_buf, len, extra);
return 0;
}
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index ebed495b9e69..b7847636501d 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -103,6 +103,7 @@ config W1_SLAVE_DS2438
config W1_SLAVE_DS250X
tristate "512b/1kb/16kb EPROM family support"
+ select CRC16
help
Say Y here if you want to use a 1-wire
512b/1kb/16kb EPROM family device (DS250x).
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a446a7221e13..81401f386c9c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -34,9 +35,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
-#include <linux/of_device.h>
-#endif
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
flip->private_data = priv;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv->dma_dev = gntdev_miscdev.this_device;
-
- /*
- * The device is not spawn from a device tree, so arch_setup_dma_ops
- * is not called, thus leaving the device with dummy DMA ops.
- * Fix this by calling of_dma_configure() with a NULL node to set
- * default DMA ops.
- */
- of_dma_configure(priv->dma_dev, NULL, true);
+ dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
#endif
pr_debug("priv %p\n", priv);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7ea6fb6a2e5d..49b381e104ef 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1363,8 +1363,7 @@ static int gnttab_setup(void)
if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
if (gnttab_shared.addr == NULL) {
- pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
- (unsigned long)xen_auto_xlat_grant_frames.vaddr);
+ pr_warn("gnttab share frames is not mapped!\n");
return -ENOMEM;
}
}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 69a626b0e594..c57c71b7d53d 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
mappass->reqcopy = *req;
icsk = inet_csk(mappass->sock->sk);
queue = &icsk->icsk_accept_queue;
- data = queue->rskq_accept_head != NULL;
+ data = READ_ONCE(queue->rskq_accept_head) != NULL;
if (data) {
mappass->reqcopy.cmd = 0;
ret = 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 8fe4eb7e5045..27e5b269e729 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1591,7 +1591,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
- struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0;
u64 lockstart;
@@ -1611,6 +1610,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
return -ENOMEM;
while (iov_iter_count(i) > 0) {
+ struct extent_state *cached_state = NULL;
size_t offset = offset_in_page(pos);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i),
@@ -1758,9 +1758,20 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, &cached_state);
+
+ /*
+ * If we have not locked the extent range, because the range's
+ * start offset is >= i_size, we might still have a non-NULL
+ * cached extent state, acquired while marking the extent range
+ * as delalloc through btrfs_dirty_pages(). Therefore free any
+ * possible cached extent state to avoid a memory leak.
+ */
if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state);
+ else
+ free_extent_state(cached_state);
+
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
true);
if (ret) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a0546401bc0a..0f2754eaa05b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6305,13 +6305,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
u32 sizes[2];
int nitems = name ? 2 : 1;
unsigned long ptr;
+ unsigned int nofs_flag;
int ret;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
+ nofs_flag = memalloc_nofs_save();
inode = new_inode(fs_info->sb);
+ memalloc_nofs_restore(nofs_flag);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e87cbdad02a3..b57f3618e58e 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root,
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
u32 count;
- int i = 0, tree_block_level = 0, ret;
+ int i = 0, tree_block_level = 0, ret = 0;
struct btrfs_key key;
int nritems = btrfs_header_nritems(leaf);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index f3215028235c..123ac54af071 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5085,7 +5085,7 @@ static int clone_range(struct send_ctx *sctx,
struct btrfs_path *path;
struct btrfs_key key;
int ret;
- u64 clone_src_i_size;
+ u64 clone_src_i_size = 0;
/*
* Prevent cloning from a zero offset with a length matching the sector
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 29b82a795522..8a6cc600bf18 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2932,7 +2932,8 @@ out:
* in the tree of log roots
*/
static int update_log_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *log)
+ struct btrfs_root *log,
+ struct btrfs_root_item *root_item)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret;
@@ -2940,10 +2941,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
if (log->log_transid == 1) {
/* insert root item on the first sync */
ret = btrfs_insert_root(trans, fs_info->log_root_tree,
- &log->root_key, &log->root_item);
+ &log->root_key, root_item);
} else {
ret = btrfs_update_root(trans, fs_info->log_root_tree,
- &log->root_key, &log->root_item);
+ &log->root_key, root_item);
}
return ret;
}
@@ -3041,6 +3042,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+ struct btrfs_root_item new_root_item;
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
@@ -3104,18 +3106,26 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
+ /*
+ * We _must_ update under the root->log_mutex in order to make sure we
+ * have a consistent view of the log root we are trying to commit at
+ * this moment.
+ *
+ * We _must_ copy this into a local copy, because we are not holding the
+ * log_root_tree->log_mutex yet. This is important because when we
+ * commit the log_root_tree we must have a consistent view of the
+ * log_root_tree when we update the super block to point at the
+ * log_root_tree bytenr. If we update the log_root_tree here we'll race
+ * with the commit and possibly point at the new block which we may not
+ * have written out.
+ */
btrfs_set_root_node(&log->root_item, log->node);
+ memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
- * Update or create log root item under the root's log_mutex to prevent
- * races with concurrent log syncs that can lead to failure to update
- * log root item because it was not created yet.
- */
- ret = update_log_root(trans, log);
- /*
* IO has been started, blocks of the log tree have WRITTEN flag set
* in their headers. new modifications of the log will be written to
* new positions. so it's safe to allow log writers to go in.
@@ -3135,6 +3145,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
mutex_lock(&log_root_tree->log_mutex);
+
+ /*
+ * Now we are safe to update the log_root_tree because we're under the
+ * log_mutex, and we're a current writer so we're holding the commit
+ * open until we drop the log_mutex.
+ */
+ ret = update_log_root(trans, log, &new_root_item);
+
if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/* atomic_dec_and_test implies a barrier */
cond_wake_up_nomb(&log_root_tree->log_writer_wait);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index cdd7af424033..bdfe4493e43a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3845,7 +3845,11 @@ static int alloc_profile_is_valid(u64 flags, int extended)
return !extended; /* "0" is valid for usual profiles */
/* true if exactly one bit set */
- return is_power_of_2(flags);
+ /*
+ * Don't use is_power_of_2(unsigned long) because it won't work
+ * for the single profile (1ULL << 48) on 32-bit CPUs.
+ */
+ return flags != 0 && (flags & (flags - 1)) == 0;
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a8a8f84f3bbf..a5163296d9d9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
}
done:
- if (*p != end)
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
bad:
@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
goto bad;
info->filelock_reply = *p;
- *p += sizeof(*info->filelock_reply);
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
{
if (features == (u64)-1 ||
(features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
+ /* Malformed reply? */
if (*p == end) {
info->has_create_ino = false;
} else {
info->has_create_ino = true;
- info->ino = ceph_decode_64(p);
+ ceph_decode_64_safe(p, end, info->ino, bad);
}
+ } else {
+ if (*p != end)
+ goto bad;
}
- if (unlikely(*p != end))
- goto bad;
+ /* Skip over any unrecognized fields */
+ *p = end;
return 0;
-
bad:
return -EIO;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2e9c7f493f99..c049c7b3aa87 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -169,18 +169,26 @@ cifs_read_super(struct super_block *sb)
else
sb->s_maxbytes = MAX_NON_LFS;
- /* BB FIXME fix time_gran to be larger for LANMAN sessions */
- sb->s_time_gran = 100;
-
- if (tcon->unix_ext) {
- ts = cifs_NTtimeToUnix(0);
+ /* Some very old servers like DOS and OS/2 used 2 second granularity */
+ if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
+ ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0) &&
+ !tcon->unix_ext) {
+ sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
+ ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
sb->s_time_min = ts.tv_sec;
- ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
+ ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
+ cpu_to_le16(SMB_TIME_MAX), 0);
sb->s_time_max = ts.tv_sec;
} else {
- ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
+ /*
+ * Almost every server, including all SMB2+, uses DCE TIME
+ * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
+ */
+ sb->s_time_gran = 100;
+ ts = cifs_NTtimeToUnix(0);
sb->s_time_min = ts.tv_sec;
- ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), cpu_to_le16(SMB_TIME_MAX), 0);
+ ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
sb->s_time_max = ts.tv_sec;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 2e960e1049db..50dfd9049370 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1210,7 +1210,7 @@ struct cifs_search_info {
bool smallBuf:1; /* so we know which buf_release function to call */
};
-#define ACL_NO_MODE -1
+#define ACL_NO_MODE ((umode_t)(-1))
struct cifs_open_parms {
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2850c3ce4391..a64dfa95a925 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -4264,7 +4264,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
server->ops->qfs_tcon(*xid, tcon);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
- FILE_READ_ONLY_DEVICE)
+ cpu_to_le32(FILE_READ_ONLY_DEVICE))
cifs_dbg(VFS, "mounted to read only share\n");
else if ((cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_RW_CACHE) == 0)
@@ -4445,7 +4445,7 @@ static int setup_dfs_tgt_conn(const char *path,
int rc;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *fake_devname = NULL;
- struct smb_vol fake_vol = {0};
+ struct smb_vol fake_vol = {NULL};
cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd5ac841aefa..7ce689d31aa2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -738,10 +738,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
static int
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
+ struct inode *inode;
+
if (flags & LOOKUP_RCU)
return -ECHILD;
if (d_really_is_positive(direntry)) {
+ inode = d_inode(direntry);
+ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+ CIFS_I(inode)->time = 0; /* force reval */
+
if (cifs_revalidate_dentry(direntry))
return 0;
else {
@@ -752,7 +758,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
* attributes will have been updated by
* cifs_revalidate_dentry().
*/
- if (IS_AUTOMOUNT(d_inode(direntry)) &&
+ if (IS_AUTOMOUNT(inode) &&
!(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
spin_lock(&direntry->d_lock);
direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4b95700c507c..5ad15de2bb4f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -253,6 +253,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, fid);
+ if (rc) {
+ server->ops->close(xid, tcon, fid);
+ if (rc == -ESTALE)
+ rc = -EOPENSTALE;
+ }
+
out:
kfree(buf);
return rc;
@@ -1840,13 +1846,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
@@ -1858,7 +1863,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
@@ -1866,7 +1871,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
} else /* write only file */
break; /* write only files are last so must be done */
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return NULL;
}
@@ -1877,7 +1882,6 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
@@ -1897,16 +1901,15 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
- tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
refind_writable:
if (refind > MAX_REOPEN_ATT) {
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return rc;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1918,7 +1921,7 @@ refind_writable:
if (!open_file->invalidHandle) {
/* found a good writable file */
cifsFileInfo_get(open_file);
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
*ret_file = open_file;
return 0;
} else {
@@ -1938,7 +1941,7 @@ refind_writable:
cifsFileInfo_get(inv_file);
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
@@ -1953,7 +1956,7 @@ refind_writable:
cifsFileInfo_put(inv_file);
++refind;
inv_file = NULL;
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
goto refind_writable;
}
@@ -4461,17 +4464,15 @@ static int cifs_readpage(struct file *file, struct page *page)
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
- struct cifs_tcon *tcon =
- cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
- spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifs_inode->open_file_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return 1;
}
}
- spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_inode->open_file_lock);
return 0;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 3bae2e53f0b8..5dcc95b38310 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -414,6 +414,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -421,6 +422,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if filetype is different, return error */
if (unlikely(((*pinode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -933,6 +935,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgii_exit;
}
@@ -940,6 +943,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* if filetype is different, return error */
if (unlikely(((*inode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgii_exit;
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 49c17ee18254..9b41436fb8db 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -117,10 +117,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{0, 0}
};
-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
- {0, 0}
-};
-
/*
* Convert a string containing text IPv4 or IPv6 address to binary form.
*
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 85f9d614d968..05149862aea4 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -751,8 +751,8 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
unsigned int num = *num_iovec;
iov[num].iov_base = create_posix_buf(mode);
- if (mode == -1)
- cifs_dbg(VFS, "illegal mode\n"); /* BB REMOVEME */
+ if (mode == ACL_NO_MODE)
+ cifs_dbg(FYI, "illegal mode\n");
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
@@ -2521,11 +2521,8 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
return rc;
}
- /* TODO: add handling for the mode on create */
- if (oparms->disposition == FILE_CREATE)
- cifs_dbg(VFS, "mode is 0x%x\n", oparms->mode); /* BB REMOVEME */
-
- if ((oparms->disposition == FILE_CREATE) && (oparms->mode != -1)) {
+ if ((oparms->disposition == FILE_CREATE) &&
+ (oparms->mode != ACL_NO_MODE)) {
if (n_iov > 2) {
struct create_context *ccontext =
(struct create_context *)iov[n_iov-1].iov_base;
@@ -3217,7 +3214,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
- req->OutputBufferLength = SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE;
+ req->OutputBufferLength =
+ cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
req->CompletionFilter = cpu_to_le32(completion_filter);
if (watch_tree)
req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index da3a6d580808..71b2930b8e0b 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -150,6 +150,10 @@ extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size);
extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+ u32 completion_filter);
+
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index ae196784f487..9329ced91f1d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -241,9 +241,8 @@ void dio_warn_stale_pagecache(struct file *filp)
}
}
-/**
+/*
* dio_complete() - called when all DIO BIO I/O has been completed
- * @offset: the byte offset in the file of the completed operation
*
* This drops i_dio_count, lets interested parties know that a DIO operation
* has completed, and calculates the resulting return code for the operation.
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8aaa7eec7b74..8461a6322039 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -164,8 +164,13 @@ static void finish_writeback_work(struct bdi_writeback *wb,
if (work->auto_free)
kfree(work);
- if (done && atomic_dec_and_test(&done->cnt))
- wake_up_all(done->waitq);
+ if (done) {
+ wait_queue_head_t *waitq = done->waitq;
+
+ /* @done can't be accessed after the following dec */
+ if (atomic_dec_and_test(&done->cnt))
+ wake_up_all(waitq);
+ }
}
static void wb_queue_work(struct bdi_writeback *wb,
@@ -900,7 +905,7 @@ restart:
* cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
* @bdi_id: target bdi id
* @memcg_id: target memcg css id
- * @nr_pages: number of pages to write, 0 for best-effort dirty flushing
+ * @nr: number of pages to write, 0 for best-effort dirty flushing
* @reason: reason why some writeback work initiated
* @done: target wb_completion
*
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8a0381f1a43b..67dbe0201e0d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -322,6 +322,8 @@ struct io_kiocb {
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */
+#define REQ_F_ISREG 2048 /* regular file */
+#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
u64 user_data;
u32 result;
u32 sequence;
@@ -415,27 +417,27 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
return ctx;
}
+static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+}
+
static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
- /* timeout requests always honor sequence */
- if (!(req->flags & REQ_F_TIMEOUT) &&
- (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+ return __io_sequence_defer(ctx, req);
}
-static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
- struct list_head *list)
+static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
- if (list_empty(list))
- return NULL;
-
- req = list_first_entry(list, struct io_kiocb, list);
- if (!io_sequence_defer(ctx, req)) {
+ req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
+ if (req && !io_sequence_defer(ctx, req)) {
list_del_init(&req->list);
return req;
}
@@ -443,14 +445,17 @@ static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
return NULL;
}
-static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
-{
- return __io_get_deferred_req(ctx, &ctx->defer_list);
-}
-
static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
{
- return __io_get_deferred_req(ctx, &ctx->timeout_list);
+ struct io_kiocb *req;
+
+ req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
+ if (req && !__io_sequence_defer(ctx, req)) {
+ list_del_init(&req->list);
+ return req;
+ }
+
+ return NULL;
}
static void __io_commit_cqring(struct io_ring_ctx *ctx)
@@ -591,14 +596,6 @@ static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
io_cqring_ev_posted(ctx);
}
-static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
-{
- percpu_ref_put_many(&ctx->refs, refs);
-
- if (waitqueue_active(&ctx->wait))
- wake_up(&ctx->wait);
-}
-
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state)
{
@@ -646,7 +643,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
req->result = 0;
return req;
out:
- io_ring_drop_ctx_refs(ctx, 1);
+ percpu_ref_put(&ctx->refs);
return NULL;
}
@@ -654,7 +651,7 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
{
if (*nr) {
kmem_cache_free_bulk(req_cachep, *nr, reqs);
- io_ring_drop_ctx_refs(ctx, *nr);
+ percpu_ref_put_many(&ctx->refs, *nr);
*nr = 0;
}
}
@@ -663,7 +660,7 @@ static void __io_free_req(struct io_kiocb *req)
{
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file);
- io_ring_drop_ctx_refs(req->ctx, 1);
+ percpu_ref_put(&req->ctx->refs);
kmem_cache_free(req_cachep, req);
}
@@ -919,26 +916,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
return ret;
}
-static void kiocb_end_write(struct kiocb *kiocb)
+static void kiocb_end_write(struct io_kiocb *req)
{
- if (kiocb->ki_flags & IOCB_WRITE) {
- struct inode *inode = file_inode(kiocb->ki_filp);
+ /*
+ * Tell lockdep we inherited freeze protection from submission
+ * thread.
+ */
+ if (req->flags & REQ_F_ISREG) {
+ struct inode *inode = file_inode(req->file);
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
- if (S_ISREG(inode->i_mode))
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- file_end_write(kiocb->ki_filp);
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
}
+ file_end_write(req->file);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
@@ -950,7 +947,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
- kiocb_end_write(kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
@@ -1064,8 +1062,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
if (!req->file)
return -EBADF;
- if (force_nonblock && !io_file_supports_async(req->file))
- force_nonblock = false;
+ if (S_ISREG(file_inode(req->file)->i_mode))
+ req->flags |= REQ_F_ISREG;
+
+ /*
+ * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
+ * we know to async punt it even if it was opened O_NONBLOCK
+ */
+ if (force_nonblock && !io_file_supports_async(req->file)) {
+ req->flags |= REQ_F_MUST_PUNT;
+ return -EAGAIN;
+ }
kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -1086,7 +1093,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
return ret;
/* don't allow async punt if RWF_NOWAIT was requested */
- if (kiocb->ki_flags & IOCB_NOWAIT)
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ (req->file->f_flags & O_NONBLOCK))
req->flags |= REQ_F_NOWAIT;
if (force_nonblock)
@@ -1387,7 +1395,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
* need async punt anyway, so it's more efficient to do it
* here.
*/
- if (force_nonblock && ret2 > 0 && ret2 < read_size)
+ if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
+ (req->flags & REQ_F_ISREG) &&
+ ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
@@ -1452,7 +1462,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
* released so that it doesn't complain about the held lock when
* we return to userspace.
*/
- if (S_ISREG(file_inode(file)->i_mode)) {
+ if (req->flags & REQ_F_ISREG) {
__sb_start_write(file_inode(file)->i_sb,
SB_FREEZE_WRITE, true);
__sb_writers_release(file_inode(file)->i_sb,
@@ -1889,7 +1899,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- unsigned count, req_dist, tail_index;
+ unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
struct timespec64 ts;
@@ -1912,21 +1922,36 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
count = 1;
req->sequence = ctx->cached_sq_head + count - 1;
+ /* reuse it to store the count */
+ req->submit.sequence = count;
req->flags |= REQ_F_TIMEOUT;
/*
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.
*/
- tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
- req_dist = req->sequence - tail_index;
spin_lock_irq(&ctx->completion_lock);
list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
- unsigned dist;
+ unsigned nxt_sq_head;
+ long long tmp, tmp_nxt;
- dist = nxt->sequence - tail_index;
- if (req_dist >= dist)
+ /*
+ * Since cached_sq_head + count - 1 can overflow, use type long
+ * long to store it.
+ */
+ tmp = (long long)ctx->cached_sq_head + count - 1;
+ nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
+ tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
+
+ /*
+ * cached_sq_head may overflow, and it will never overflow twice
+ * once there is some timeout req still be valid.
+ */
+ if (ctx->cached_sq_head < nxt_sq_head)
+ tmp += UINT_MAX;
+
+ if (tmp >= tmp_nxt)
break;
}
list_add(&req->list, entry);
@@ -2272,7 +2297,13 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
int ret;
ret = __io_submit_sqe(ctx, req, s, force_nonblock);
- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+
+ /*
+ * We async punt it if the file wasn't marked NOWAIT, or if the file
+ * doesn't support non-blocking read/write attempts
+ */
+ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
+ (req->flags & REQ_F_MUST_PUNT))) {
struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
@@ -2761,7 +2792,7 @@ out:
if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req,
- block_for_last);
+ !block_for_last);
if (statep)
io_submit_state_end(statep);
@@ -2920,8 +2951,12 @@ static void io_finish_async(struct io_ring_ctx *ctx)
static void io_destruct_skb(struct sk_buff *skb)
{
struct io_ring_ctx *ctx = skb->sk->sk_user_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
+ if (ctx->sqo_wq[i])
+ flush_workqueue(ctx->sqo_wq[i]);
- io_finish_async(ctx);
unix_destruct_scm(skb);
}
@@ -3630,7 +3665,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
}
}
- io_ring_drop_ctx_refs(ctx, 1);
+ percpu_ref_put(&ctx->refs);
out_fput:
fdput(f);
return submitted ? submitted : ret;
diff --git a/fs/libfs.c b/fs/libfs.c
index c9b2850c0f7c..1463b038ffc4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -89,58 +89,45 @@ int dcache_dir_close(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dcache_dir_close);
/* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
- struct list_head *from,
- int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and returned to caller.
+ * If no such element exists, NULL is returned.
+ */
+static struct dentry *scan_positives(struct dentry *cursor,
+ struct list_head *p,
+ loff_t count,
+ struct dentry *last)
{
- unsigned *seq = &parent->d_inode->i_dir_seq, n;
- struct dentry *res;
- struct list_head *p;
- bool skipped;
- int i;
+ struct dentry *dentry = cursor->d_parent, *found = NULL;
-retry:
- i = count;
- skipped = false;
- n = smp_load_acquire(seq) & ~1;
- res = NULL;
- rcu_read_lock();
- for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+ spin_lock(&dentry->d_lock);
+ while ((p = p->next) != &dentry->d_subdirs) {
struct dentry *d = list_entry(p, struct dentry, d_child);
- if (!simple_positive(d)) {
- skipped = true;
- } else if (!--i) {
- res = d;
- break;
+ // we must at least skip cursors, to avoid livelocks
+ if (d->d_flags & DCACHE_DENTRY_CURSOR)
+ continue;
+ if (simple_positive(d) && !--count) {
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(d))
+ found = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ if (likely(found))
+ break;
+ count = 1;
+ }
+ if (need_resched()) {
+ list_move(&cursor->d_child, p);
+ p = &cursor->d_child;
+ spin_unlock(&dentry->d_lock);
+ cond_resched();
+ spin_lock(&dentry->d_lock);
}
}
- rcu_read_unlock();
- if (skipped) {
- smp_rmb();
- if (unlikely(*seq != n))
- goto retry;
- }
- return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
- struct dentry *parent = cursor->d_parent;
- unsigned n, *seq = &parent->d_inode->i_dir_seq;
- spin_lock(&parent->d_lock);
- for (;;) {
- n = *seq;
- if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
- break;
- cpu_relax();
- }
- __list_del(cursor->d_child.prev, cursor->d_child.next);
- if (after)
- list_add(&cursor->d_child, after);
- else
- list_add_tail(&cursor->d_child, &parent->d_subdirs);
- smp_store_release(seq, n + 2);
- spin_unlock(&parent->d_lock);
+ spin_unlock(&dentry->d_lock);
+ dput(last);
+ return found;
}
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -158,17 +145,25 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
if (offset != file->f_pos) {
+ struct dentry *cursor = file->private_data;
+ struct dentry *to = NULL;
+
+ inode_lock_shared(dentry->d_inode);
+
+ if (offset > 2)
+ to = scan_positives(cursor, &dentry->d_subdirs,
+ offset - 2, NULL);
+ spin_lock(&dentry->d_lock);
+ if (to)
+ list_move(&cursor->d_child, &to->d_child);
+ else
+ list_del_init(&cursor->d_child);
+ spin_unlock(&dentry->d_lock);
+ dput(to);
+
file->f_pos = offset;
- if (file->f_pos >= 2) {
- struct dentry *cursor = file->private_data;
- struct dentry *to;
- loff_t n = file->f_pos - 2;
-
- inode_lock_shared(dentry->d_inode);
- to = next_positive(dentry, &dentry->d_subdirs, n);
- move_cursor(cursor, to ? &to->d_child : NULL);
- inode_unlock_shared(dentry->d_inode);
- }
+
+ inode_unlock_shared(dentry->d_inode);
}
return offset;
}
@@ -190,25 +185,35 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *p = &cursor->d_child;
- struct dentry *next;
- bool moved = false;
+ struct list_head *anchor = &dentry->d_subdirs;
+ struct dentry *next = NULL;
+ struct list_head *p;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2)
- p = &dentry->d_subdirs;
- while ((next = next_positive(dentry, p, 1)) != NULL) {
+ p = anchor;
+ else if (!list_empty(&cursor->d_child))
+ p = &cursor->d_child;
+ else
+ return 0;
+
+ while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
break;
- moved = true;
- p = &next->d_child;
ctx->pos++;
+ p = &next->d_child;
}
- if (moved)
- move_cursor(cursor, p);
+ spin_lock(&dentry->d_lock);
+ if (next)
+ list_move_tail(&cursor->d_child, &next->d_child);
+ else
+ list_del_init(&cursor->d_child);
+ spin_unlock(&dentry->d_lock);
+ dput(next);
+
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
@@ -468,8 +473,7 @@ EXPORT_SYMBOL(simple_write_begin);
/**
* simple_write_end - .write_end helper for non-block-device FSes
- * @available: See .write_end of address_space_operations
- * @file: "
+ * @file: See .write_end of address_space_operations
* @mapping: "
* @pos: "
* @len: "
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 222d7115db71..040a50fd9bf3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -64,13 +64,6 @@
static struct kmem_cache *nfs_direct_cachep;
-/*
- * This represents a set of asynchronous requests that we're waiting on
- */
-struct nfs_direct_mirror {
- ssize_t count;
-};
-
struct nfs_direct_req {
struct kref kref; /* release manager */
@@ -84,9 +77,6 @@ struct nfs_direct_req {
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
- struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
- int mirror_count;
-
loff_t io_start; /* Start offset for I/O */
ssize_t count, /* bytes actually processed */
max_count, /* max expected count */
@@ -123,32 +113,42 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
}
static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+ const struct nfs_pgio_header *hdr,
+ ssize_t dreq_len)
{
- int i;
- ssize_t count;
+ if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+ test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+ return;
+ if (dreq->max_count >= dreq_len) {
+ dreq->max_count = dreq_len;
+ if (dreq->count > dreq_len)
+ dreq->count = dreq_len;
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ dreq->error = hdr->error;
+ else /* Clear outstanding error if this is EOF */
+ dreq->error = 0;
+ }
+}
- WARN_ON_ONCE(dreq->count >= dreq->max_count);
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+ const struct nfs_pgio_header *hdr)
+{
+ loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+ ssize_t dreq_len = 0;
- if (dreq->mirror_count == 1) {
- dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
- dreq->count += hdr->good_bytes;
- } else {
- /* mirrored writes */
- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
- }
- /* update the dreq->count by finding the minimum agreed count from all
- * mirrors */
- count = dreq->mirrors[0].count;
+ if (hdr_end > dreq->io_start)
+ dreq_len = hdr_end - dreq->io_start;
- for (i = 1; i < dreq->mirror_count; i++)
- count = min(count, dreq->mirrors[i].count);
+ nfs_direct_handle_truncated(dreq, hdr, dreq_len);
- dreq->count = count;
- }
+ if (dreq_len > dreq->max_count)
+ dreq_len = dreq->max_count;
+
+ if (dreq->count < dreq_len)
+ dreq->count = dreq_len;
}
/*
@@ -293,18 +293,6 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}
-static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
- struct nfs_pageio_descriptor *pgio,
- struct nfs_page *req)
-{
- int mirror_count = 1;
-
- if (pgio->pg_ops->pg_get_mirror_count)
- mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
-
- dreq->mirror_count = mirror_count;
-}
-
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
@@ -319,7 +307,6 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
- dreq->mirror_count = 1;
spin_lock_init(&dreq->lock);
return dreq;
@@ -402,20 +389,12 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_direct_req *dreq = hdr->dreq;
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
- dreq->error = hdr->error;
-
if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
spin_unlock(&dreq->lock);
goto out_put;
}
- if (hdr->good_bytes != 0)
- nfs_direct_good_bytes(dreq, hdr);
-
- if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
- dreq->error = 0;
-
+ nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
@@ -646,29 +625,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
LIST_HEAD(failed);
- int i;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
+ dreq->max_count = 0;
+ list_for_each_entry(req, &reqs, wb_list)
+ dreq->max_count += req->wb_bytes;
dreq->verf.committed = NFS_INVALID_STABLE_HOW;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
- for (i = 0; i < dreq->mirror_count; i++)
- dreq->mirrors[i].count = 0;
get_dreq(dreq);
nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
- req = nfs_list_entry(reqs.next);
- nfs_direct_setup_mirroring(dreq, &desc, req);
- if (desc.pg_error < 0) {
- list_splice_init(&reqs, &failed);
- goto out_failed;
- }
-
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
/* Bump the transmission count */
req->wb_nio++;
@@ -686,7 +658,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
}
nfs_pageio_complete(&desc);
-out_failed:
while (!list_empty(&failed)) {
req = nfs_list_entry(failed.next);
nfs_list_remove_request(req);
@@ -791,17 +762,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
-
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
- dreq->error = hdr->error;
-
if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
spin_unlock(&dreq->lock);
goto out_put;
}
+ nfs_direct_count_bytes(dreq, hdr);
if (hdr->good_bytes != 0) {
- nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
request_commit = true;
@@ -923,7 +890,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
}
- nfs_direct_setup_mirroring(dreq, &desc, req);
if (desc.pg_error < 0) {
nfs_free_request(req);
result = desc.pg_error;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 11eafcfc490b..ab8ca20fd579 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6106,6 +6106,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
status = nfs4_call_sync_custom(&task_setup_data);
if (setclientid.sc_cred) {
+ kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 85ca49549b39..52cab65f91cf 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -786,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *head;
- atomic_long_dec(&nfsi->nrequests);
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
head = req->wb_head;
@@ -799,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_unlock(&mapping->private_lock);
}
- if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
nfs_release_request(req);
+ atomic_long_dec(&nfsi->nrequests);
+ }
}
static void
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 8de1c9d644f6..9cd0a6815933 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2049,7 +2049,8 @@ out_write_size:
inode->i_mtime = inode->i_ctime = current_time(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
- ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ if (handle)
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
if (handle)
ocfs2_journal_dirty(handle, wc->w_di_bh);
@@ -2146,13 +2147,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
struct ocfs2_dio_write_ctxt *dwc = NULL;
struct buffer_head *di_bh = NULL;
u64 p_blkno;
- loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
+ unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
+ loff_t pos = iblock << i_blkbits;
+ sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
unsigned len, total_len = bh_result->b_size;
int ret = 0, first_get_block = 0;
len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
len = min(total_len, len);
+ /*
+ * bh_result->b_size is count in get_more_blocks according to write
+ * "pos" and "end", we need map twice to return different buffer state:
+ * 1. area in file size, not set NEW;
+ * 2. area out file size, set NEW.
+ *
+ * iblock endblk
+ * |--------|---------|---------|---------
+ * |<-------area in file------->|
+ */
+
+ if ((iblock <= endblk) &&
+ ((iblock + ((len - 1) >> i_blkbits)) > endblk))
+ len = (endblk - iblock + 1) << i_blkbits;
+
mlog(0, "get block of %lu at %llu:%u req %u\n",
inode->i_ino, pos, len, total_len);
@@ -2236,6 +2254,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
if (desc->c_needs_zero)
set_buffer_new(bh_result);
+ if (iblock > endblk)
+ set_buffer_new(bh_result);
+
/* May sleep in end_io. It should not happen in a irq context. So defer
* it to dio work queue. */
set_buffer_defer_completion(bh_result);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 2e982db3e1ae..53939bf9d7d2 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
if (IS_ERR(transfer_to[USRQUOTA])) {
status = PTR_ERR(transfer_to[USRQUOTA]);
+ transfer_to[USRQUOTA] = NULL;
goto bail_unlock;
}
}
@@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
if (IS_ERR(transfer_to[GRPQUOTA])) {
status = PTR_ERR(transfer_to[GRPQUOTA]);
+ transfer_to[GRPQUOTA] = NULL;
goto bail_unlock;
}
}
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index d6f7b299eb23..efeea208fdeb 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -283,7 +283,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
if (inode_alloc)
inode_lock(inode_alloc);
- if (o2info_coherent(&fi->ifi_req)) {
+ if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 930e3d388579..699a560efbb0 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
/*
* Now that recovery is shut down, and the osb is about to be
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 158e5af767fd..720e9f94957e 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc = NULL;
cancel_delayed_work(&osb->la_enable_wq);
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 90c830e3758e..d8507972ee13 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1490,18 +1490,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi);
}
-static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
-{
- loc->xl_ops->xlo_add_entry(loc, name_hash);
- loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
- /*
- * We can't leave the new entry's xe_name_offset at zero or
- * add_namevalue() will go nuts. We set it to the size of our
- * storage so that it can never be less than any other entry.
- */
- loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
-}
-
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
@@ -2133,29 +2121,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc)
goto out;
- if (loc->xl_entry) {
- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
- orig_value_size = loc->xl_entry->xe_value_size;
- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
- if (rc)
- goto out;
- goto alloc_value;
- }
+ if (!loc->xl_entry) {
+ rc = -EINVAL;
+ goto out;
+ }
- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
- orig_clusters = ocfs2_xa_value_clusters(loc);
- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
- if (rc) {
- mlog_errno(rc);
- ocfs2_xa_cleanup_value_truncate(loc,
- "overwriting",
- orig_clusters);
- goto out;
- }
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
+ goto out;
}
- ocfs2_xa_wipe_namevalue(loc);
- } else
- ocfs2_xa_add_entry(loc, name_hash);
+ }
+ ocfs2_xa_wipe_namevalue(loc);
/*
* If we get here, we have a blank entry. Fill it. We grow our
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index ac9247371871..8c1f1bb1a5ce 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -132,9 +132,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
- show_val_kb(m, "FileHugePages: ",
+ show_val_kb(m, "FileHugePages: ",
global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
- show_val_kb(m, "FilePmdMapped: ",
+ show_val_kb(m, "FilePmdMapped: ",
global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
#endif
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 544d1ee15aee..7c952ee732e6 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
+
if (!ppage || PageSlab(ppage) || page_has_type(ppage))
pcount = 0;
else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (put_user(stable_page_flags(ppage), out)) {
ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- if (pfn_valid(pfn))
- ppage = pfn_to_page(pfn);
- else
- ppage = NULL;
+ /*
+ * TODO: ZONE_DEVICE support requires to identify
+ * memmaps that were actually initialized.
+ */
+ ppage = pfn_to_online_page(pfn);
if (ppage)
ino = page_cgroup_ino(ppage);
diff --git a/fs/readdir.c b/fs/readdir.c
index 19bea591c3f1..d26d5ea4de7b 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -27,53 +27,13 @@
/*
* Note the "unsafe_put_user() semantics: we goto a
* label for errors.
- *
- * Also note how we use a "while()" loop here, even though
- * only the biggest size needs to loop. The compiler (well,
- * at least gcc) is smart enough to turn the smaller sizes
- * into just if-statements, and this way we don't need to
- * care whether 'u64' or 'u32' is the biggest size.
- */
-#define unsafe_copy_loop(dst, src, len, type, label) \
- while (len >= sizeof(type)) { \
- unsafe_put_user(get_unaligned((type *)src), \
- (type __user *)dst, label); \
- dst += sizeof(type); \
- src += sizeof(type); \
- len -= sizeof(type); \
- }
-
-/*
- * We avoid doing 64-bit copies on 32-bit architectures. They
- * might be better, but the component names are mostly small,
- * and the 64-bit cases can end up being much more complex and
- * put much more register pressure on the code, so it's likely
- * not worth the pain of unaligned accesses etc.
- *
- * So limit the copies to "unsigned long" size. I did verify
- * that at least the x86-32 case is ok without this limiting,
- * but I worry about random other legacy 32-bit cases that
- * might not do as well.
- */
-#define unsafe_copy_type(dst, src, len, type, label) do { \
- if (sizeof(type) <= sizeof(unsigned long)) \
- unsafe_copy_loop(dst, src, len, type, label); \
-} while (0)
-
-/*
- * Copy the dirent name to user space, and NUL-terminate
- * it. This should not be a function call, since we're doing
- * the copy inside a "user_access_begin/end()" section.
*/
#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
char __user *dst = (_dst); \
const char *src = (_src); \
size_t len = (_len); \
- unsafe_copy_type(dst, src, len, u64, label); \
- unsafe_copy_type(dst, src, len, u32, label); \
- unsafe_copy_type(dst, src, len, u16, label); \
- unsafe_copy_type(dst, src, len, u8, label); \
- unsafe_put_user(0, dst, label); \
+ unsafe_put_user(0, dst+len, label); \
+ unsafe_copy_to_user(dst, src, len, label); \
} while (0)
@@ -145,9 +105,9 @@ EXPORT_SYMBOL(iterate_dir);
*/
static int verify_dirent_name(const char *name, int len)
{
- if (WARN_ON_ONCE(!len))
+ if (!len)
return -EIO;
- if (WARN_ON_ONCE(memchr(name, '/', len)))
+ if (memchr(name, '/', len))
return -EIO;
return 0;
}
diff --git a/fs/super.c b/fs/super.c
index f627b7c53d2b..cfadab2cbf35 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1300,6 +1300,7 @@ int get_tree_bdev(struct fs_context *fc,
mutex_lock(&bdev->bd_fsfreeze_mutex);
if (bdev->bd_fsfreeze_count > 0) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ blkdev_put(bdev, mode);
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
return -EBUSY;
}
@@ -1308,8 +1309,10 @@ int get_tree_bdev(struct fs_context *fc,
fc->sget_key = bdev;
s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
- if (IS_ERR(s))
+ if (IS_ERR(s)) {
+ blkdev_put(bdev, mode);
return PTR_ERR(s);
+ }
if (s->s_root) {
/* Don't summarily change the RO/RW state. */
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 9fc14e38927f..0caa151cae4e 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -16,11 +16,11 @@
#include <linux/namei.h>
#include <linux/tracefs.h>
#include <linux/fsnotify.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
-#include <linux/security.h>
#define TRACEFS_DEFAULT_MODE 0700
@@ -28,25 +28,6 @@ static struct vfsmount *tracefs_mount;
static int tracefs_mount_count;
static bool tracefs_registered;
-static int default_open_file(struct inode *inode, struct file *filp)
-{
- struct dentry *dentry = filp->f_path.dentry;
- struct file_operations *real_fops;
- int ret;
-
- if (!dentry)
- return -EINVAL;
-
- ret = security_locked_down(LOCKDOWN_TRACEFS);
- if (ret)
- return ret;
-
- real_fops = dentry->d_fsdata;
- if (!real_fops->open)
- return 0;
- return real_fops->open(inode, filp);
-}
-
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -241,12 +222,6 @@ static int tracefs_apply_options(struct super_block *sb)
return 0;
}
-static void tracefs_destroy_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode))
- kfree(inode->i_fop);
-}
-
static int tracefs_remount(struct super_block *sb, int *flags, char *data)
{
int err;
@@ -283,7 +258,6 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
static const struct super_operations tracefs_super_operations = {
.statfs = simple_statfs,
.remount_fs = tracefs_remount,
- .destroy_inode = tracefs_destroy_inode,
.show_options = tracefs_show_options,
};
@@ -414,10 +388,12 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
- struct file_operations *proxy_fops;
struct dentry *dentry;
struct inode *inode;
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
@@ -430,20 +406,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
if (unlikely(!inode))
return failed_creating(dentry);
- proxy_fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
- if (unlikely(!proxy_fops)) {
- iput(inode);
- return failed_creating(dentry);
- }
-
- if (!fops)
- fops = &tracefs_file_operations;
-
- dentry->d_fsdata = (void *)fops;
- memcpy(proxy_fops, fops, sizeof(*proxy_fops));
- proxy_fops->open = default_open_file;
inode->i_mode = mode;
- inode->i_fop = proxy_fops;
+ inode->i_fop = fops ? fops : &tracefs_file_operations;
inode->i_private = data;
d_instantiate(dentry, inode);
fsnotify_create(dentry->d_parent->d_inode, dentry);
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 5de296b34ab1..14fbdf22b7e7 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -28,12 +28,11 @@ xfs_get_aghdr_buf(
struct xfs_mount *mp,
xfs_daddr_t blkno,
size_t numblks,
- int flags,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
- bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
+ bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
if (!bp)
return NULL;
@@ -345,7 +344,7 @@ xfs_ag_init_hdr(
{
struct xfs_buf *bp;
- bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
+ bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
if (!bp)
return -ENOMEM;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index b9f019603d0b..f0089e862216 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -826,32 +826,17 @@ xfs_attr_shortform_to_leaf(
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
- xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
+ xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK);
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
- if (error) {
- /*
- * If we hit an IO error middle of the transaction inside
- * grow_inode(), we may have inconsistent data. Bail out.
- */
- if (error == -EIO)
- goto out;
- xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
+ if (error)
goto out;
- }
ASSERT(blkno == 0);
error = xfs_attr3_leaf_create(args, blkno, &bp);
- if (error) {
- /* xfs_attr3_leaf_create may not have instantiated a block */
- if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
- goto out;
- xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
+ if (error)
goto out;
- }
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 4edc25a2ba80..02469d59c787 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -792,6 +792,7 @@ out_root_realloc:
*/
void
xfs_bmap_local_to_extents_empty(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork)
{
@@ -808,6 +809,7 @@ xfs_bmap_local_to_extents_empty(
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -840,7 +842,7 @@ xfs_bmap_local_to_extents(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
if (!ifp->if_bytes) {
- xfs_bmap_local_to_extents_empty(ip, whichfork);
+ xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
flags = XFS_ILOG_CORE;
goto done;
}
@@ -887,7 +889,7 @@ xfs_bmap_local_to_extents(
/* account for the change in fork size */
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
- xfs_bmap_local_to_extents_empty(ip, whichfork);
+ xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
flags |= XFS_ILOG_CORE;
ifp->if_u1.if_root = NULL;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 5bb446d80542..e2798c6f3a5f 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -182,7 +182,8 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
-void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
+ struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, const struct xfs_owner_info *oinfo,
bool skip_discard);
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 9595ced393dc..49e4bc39e7bb 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -1096,7 +1096,7 @@ xfs_dir2_sf_to_block(
memcpy(sfp, oldsfp, ifp->if_bytes);
xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
- xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
+ xfs_bmap_local_to_extents_empty(tp, dp, XFS_DATA_FORK);
dp->i_d.di_size = 0;
/*
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 39dd2b908106..e9371a8e0e26 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -366,11 +366,11 @@ struct xfs_bulkstat {
uint64_t bs_blocks; /* number of blocks */
uint64_t bs_xflags; /* extended flags */
- uint64_t bs_atime; /* access time, seconds */
- uint64_t bs_mtime; /* modify time, seconds */
+ int64_t bs_atime; /* access time, seconds */
+ int64_t bs_mtime; /* modify time, seconds */
- uint64_t bs_ctime; /* inode change time, seconds */
- uint64_t bs_btime; /* creation time, seconds */
+ int64_t bs_ctime; /* inode change time, seconds */
+ int64_t bs_btime; /* creation time, seconds */
uint32_t bs_gen; /* generation count */
uint32_t bs_uid; /* user id */
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 93b3793bc5b3..0cab11a5d390 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -341,7 +341,6 @@ xchk_refcountbt_rec(
xfs_extlen_t len;
xfs_nlink_t refcount;
bool has_cowflag;
- int error = 0;
bno = be32_to_cpu(rec->refc.rc_startblock);
len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -366,7 +365,7 @@ xchk_refcountbt_rec(
xchk_refcountbt_xref(bs->sc, bno, len, refcount);
- return error;
+ return 0;
}
/* Make sure we have as many refc blocks as the rmap says. */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 0910cb75b65d..4f443703065e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -864,6 +864,7 @@ xfs_alloc_file_space(
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
+ xfs_fileoff_t endoffset_fsb;
int nimaps;
int quota_flag;
int rt;
@@ -891,7 +892,8 @@ xfs_alloc_file_space(
imapp = &imaps[0];
nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
- allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+ endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
+ allocatesize_fsb = endoffset_fsb - startoffset_fsb;
/*
* Allocate file space until done or until there is an error
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 21c243622a79..0abba171aa89 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
unsigned short page_count, i;
xfs_off_t start, end;
int error;
+ xfs_km_flags_t kmflag_mask = 0;
+
+ /*
+ * assure zeroed buffer for non-read cases.
+ */
+ if (!(flags & XBF_READ)) {
+ kmflag_mask |= KM_ZERO;
+ gfp_mask |= __GFP_ZERO;
+ }
/*
* for buffers that are contained within a single page, just allocate
@@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
size = BBTOB(bp->b_length);
if (size < PAGE_SIZE) {
int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
- bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS);
+ bp->b_addr = kmem_alloc_io(size, align_mask,
+ KM_NOFS | kmflag_mask);
if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */
goto use_alloc_page;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a2beee9f74da..641d07f30a27 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1443,7 +1443,7 @@ xlog_alloc_log(
prev_iclog = iclog;
iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
- KM_MAYFAIL);
+ KM_MAYFAIL | KM_ZERO);
if (!iclog->ic_data)
goto out_free_iclog;
#ifdef DEBUG
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 508319039dce..c1a514ffff55 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -127,7 +127,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL);
+ return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
}
/*
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 296aab724677..b1230e33d506 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -27,34 +27,36 @@
*/
enum amd_asic_type {
CHIP_TAHITI = 0,
- CHIP_PITCAIRN,
- CHIP_VERDE,
- CHIP_OLAND,
- CHIP_HAINAN,
- CHIP_BONAIRE,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
- CHIP_TOPAZ,
- CHIP_TONGA,
- CHIP_FIJI,
- CHIP_CARRIZO,
- CHIP_STONEY,
- CHIP_POLARIS10,
- CHIP_POLARIS11,
- CHIP_POLARIS12,
- CHIP_VEGAM,
- CHIP_VEGA10,
- CHIP_VEGA12,
- CHIP_VEGA20,
- CHIP_RAVEN,
- CHIP_ARCTURUS,
- CHIP_RENOIR,
- CHIP_NAVI10,
- CHIP_NAVI14,
- CHIP_NAVI12,
+ CHIP_PITCAIRN, /* 1 */
+ CHIP_VERDE, /* 2 */
+ CHIP_OLAND, /* 3 */
+ CHIP_HAINAN, /* 4 */
+ CHIP_BONAIRE, /* 5 */
+ CHIP_KAVERI, /* 6 */
+ CHIP_KABINI, /* 7 */
+ CHIP_HAWAII, /* 8 */
+ CHIP_MULLINS, /* 9 */
+ CHIP_TOPAZ, /* 10 */
+ CHIP_TONGA, /* 11 */
+ CHIP_FIJI, /* 12 */
+ CHIP_CARRIZO, /* 13 */
+ CHIP_STONEY, /* 14 */
+ CHIP_POLARIS10, /* 15 */
+ CHIP_POLARIS11, /* 16 */
+ CHIP_POLARIS12, /* 17 */
+ CHIP_VEGAM, /* 18 */
+ CHIP_VEGA10, /* 19 */
+ CHIP_VEGA12, /* 20 */
+ CHIP_VEGA20, /* 21 */
+ CHIP_RAVEN, /* 22 */
+ CHIP_ARCTURUS, /* 23 */
+ CHIP_RENOIR, /* 24 */
+ CHIP_NAVI10, /* 25 */
+ CHIP_NAVI14, /* 26 */
+ CHIP_NAVI12, /* 27 */
CHIP_LAST,
};
+extern const char *amdgpu_asic_name[];
+
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 4b3e863c4f8a..fbf3812c4326 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -126,6 +126,7 @@ struct dw_hdmi_plat_data {
const struct drm_display_mode *mode);
unsigned long input_bus_format;
unsigned long input_bus_encoding;
+ bool use_drm_infoframe;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
deleted file mode 100644
index 037b1f7a87a5..000000000000
--- a/include/drm/drmP.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_P_H_
-#define _DRM_P_H_
-
-#include <linux/agp_backend.h>
-#include <linux/cdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/ratelimit.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/dma-fence.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-
-#include <uapi/drm/drm.h>
-#include <uapi/drm/drm_mode.h>
-
-#include <drm/drm_agpsupport.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_mm.h>
-#include <drm/drm_os_linux.h>
-#include <drm/drm_sarea.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_print.h>
-#include <drm/drm_pci.h>
-#include <drm/drm_file.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_sysfs.h>
-#include <drm/drm_vblank.h>
-#include <drm/drm_irq.h>
-#include <drm/drm_device.h>
-
-struct module;
-
-struct device_node;
-struct videomode;
-struct dma_resv;
-struct dma_buf_attachment;
-
-struct pci_dev;
-struct pci_controller;
-
-/*
- * NOTE: drmP.h is obsolete - do NOT add anything to this file
- *
- * Do not include drmP.h in new files.
- * Work is ongoing to remove drmP.h includes from existing files
- */
-
-#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ed1a985745ba..51ecb5112ef8 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -23,9 +23,9 @@
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
-#include <linux/types.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -137,6 +137,7 @@
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+# define DP_CAP_ANSI_8B10B (1 << 0)
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
@@ -604,6 +605,14 @@
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
+# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
+# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
+# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c
+# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2
+# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30
+# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4
+# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0
+# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6
#define DP_TEST_REQUEST 0x218
# define DP_TEST_LINK_TRAINING (1 << 0)
@@ -1008,6 +1017,36 @@
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+/* Link Training (LT)-tunable PHY Repeaters */
+#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
+#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
+#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
+#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */
+#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
+#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
+#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
+#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
+#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
+#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
+#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
+#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
+#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
+#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+
+/* Repeater modes */
+#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
+#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
+
/* DP HDCP message start offsets in DPCD address space */
#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
@@ -1091,6 +1130,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane);
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
@@ -1186,6 +1227,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x12 &&
@@ -1250,6 +1298,19 @@ drm_dp_sink_supports_fec(const u8 fec_capable)
return fec_capable & DP_FEC_CAPABLE;
}
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1394,22 +1455,6 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
-/*
- * DisplayPort link
- */
-#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
-
-struct drm_dp_link {
- unsigned char revision;
- unsigned int rate;
- unsigned int num_lanes;
- unsigned long capabilities;
-};
-
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 4a25e0577ae0..d5fc90b30487 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -26,6 +26,26 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stackdepot.h>
+#include <linux/timekeeping.h>
+
+enum drm_dp_mst_topology_ref_type {
+ DRM_DP_MST_TOPOLOGY_REF_GET,
+ DRM_DP_MST_TOPOLOGY_REF_PUT,
+};
+
+struct drm_dp_mst_topology_ref_history {
+ struct drm_dp_mst_topology_ref_entry {
+ enum drm_dp_mst_topology_ref_type type;
+ int count;
+ ktime_t ts_nsec;
+ depot_stack_handle_t backtrace;
+ } *entries;
+ int len;
+};
+#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+
struct drm_dp_mst_branch;
/**
@@ -45,21 +65,31 @@ struct drm_dp_vcpi {
/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
- * @input: if this port is an input port.
- * @mcs: message capability status - DP 1.2 spec.
- * @ddps: DisplayPort Device Plug Status - DP 1.2
- * @pdt: Peer Device Type
- * @ldps: Legacy Device Plug Status
- * @dpcd_rev: DPCD revision of device on this port
- * @num_sdp_streams: Number of simultaneous streams
- * @num_sdp_stream_sinks: Number of stream sinks
- * @available_pbn: Available bandwidth for this port.
+ * @input: if this port is an input port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @mcs: message capability status - DP 1.2 spec. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @pdt: Peer Device Type. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ldps: Legacy Device Plug Status. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @dpcd_rev: DPCD revision of device on this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_streams: Number of simultaneous streams. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_stream_sinks: Number of stream sinks. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @available_pbn: Available bandwidth for this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device
- * @mstb: branch device attach below this port
- * @aux: i2c aux transport to talk to device connected to this port.
+ * @aux: i2c aux transport to talk to device connected to this port, protected
+ * by &drm_dp_mst_topology_mgr.base.lock.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
- * @connector: DRM connector this port is connected to.
+ * @connector: DRM connector this port is connected to. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under.
*
* This structure represents an MST port endpoint on a device somewhere
@@ -79,6 +109,14 @@ struct drm_dp_mst_port {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
u8 port_num;
bool input;
bool mcs;
@@ -90,7 +128,17 @@ struct drm_dp_mst_port {
u8 num_sdp_stream_sinks;
uint16_t available_pbn;
struct list_head next;
- struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+ /**
+ * @mstb: the branch device connected to this port, if there is one.
+ * This should be considered protected for reading by
+ * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from writing concurrently
+ * by &drm_dp_mst_topology_mgr.probe_lock.
+ */
+ struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
struct drm_dp_mst_branch *parent;
@@ -116,7 +164,6 @@ struct drm_dp_mst_port {
* @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch.
* @msg_slots: one bit per transmitted msg slot.
- * @ports: linked list of ports on this branch.
* @port_parent: pointer to the port parent, NULL if toplevel.
* @mgr: topology manager for this branch device.
* @tx_slots: transmission slots for this device.
@@ -143,11 +190,35 @@ struct drm_dp_mst_branch {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
+ /**
+ * @destroy_next: linked-list entry used by
+ * drm_dp_delayed_destroy_work()
+ */
+ struct list_head destroy_next;
+
u8 rad[8];
u8 lct;
int num_ports;
int msg_slots;
+ /**
+ * @ports: the list of ports on this branch device. This should be
+ * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
+ * There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from updating the list
+ * concurrently by @drm_dp_mst_topology_mgr.probe_lock
+ */
struct list_head ports;
/* list of tx ops queue for this port */
@@ -495,6 +566,13 @@ struct drm_dp_mst_topology_mgr {
struct mutex lock;
/**
+ * @probe_lock: Prevents @work and @up_req_work, the only writers of
+ * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
+ * while they update the topology.
+ */
+ struct mutex probe_lock;
+
+ /**
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
@@ -571,18 +649,49 @@ struct drm_dp_mst_topology_mgr {
struct work_struct tx_work;
/**
- * @destroy_connector_list: List of to be destroyed connectors.
+ * @destroy_port_list: List of to be destroyed connectors.
+ */
+ struct list_head destroy_port_list;
+ /**
+ * @destroy_branch_device_list: List of to be destroyed branch
+ * devices.
+ */
+ struct list_head destroy_branch_device_list;
+ /**
+ * @delayed_destroy_lock: Protects @destroy_port_list and
+ * @destroy_branch_device_list.
+ */
+ struct mutex delayed_destroy_lock;
+ /**
+ * @delayed_destroy_work: Work item to destroy MST port and branch
+ * devices, needed to avoid locking inversion.
+ */
+ struct work_struct delayed_destroy_work;
+
+ /**
+ * @up_req_list: List of pending up requests from the topology that
+ * need to be processed, in chronological order.
*/
- struct list_head destroy_connector_list;
+ struct list_head up_req_list;
/**
- * @destroy_connector_lock: Protects @connector_list.
+ * @up_req_lock: Protects @up_req_list
*/
- struct mutex destroy_connector_lock;
+ struct mutex up_req_lock;
/**
- * @destroy_connector_work: Work item to destroy connectors. Needed to
- * avoid locking inversion.
+ * @up_req_work: Work item to process up requests received from the
+ * topology. Needed to avoid blocking hotplug handling and sideband
+ * transmissions.
*/
- struct work_struct destroy_connector_work;
+ struct work_struct up_req_work;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history_lock: protects
+ * &drm_dp_mst_port.topology_ref_history and
+ * &drm_dp_mst_branch.topology_ref_history.
+ */
+ struct mutex topology_ref_history_lock;
+#endif
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
@@ -599,7 +708,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
@@ -638,7 +751,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync);
ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b9719418c3d2..f0b03d401c27 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -368,6 +368,10 @@ drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -481,7 +485,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
bool drm_detect_hdmi_monitor(struct edid *edid);
bool drm_detect_monitor_audio(struct edid *edid);
enum hdmi_quantization_range
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8dcc012ccbc8..2338e9f94a03 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -235,7 +235,6 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 6aaba14f5972..97a48165642c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -151,6 +151,21 @@ struct drm_gem_object_funcs {
void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
/**
+ * @mmap:
+ *
+ * Handle mmap() of the gem object, setup vma accordingly.
+ *
+ * This callback is optional.
+ *
+ * The callback is used by by both drm_gem_mmap_obj() and
+ * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
+ * used, the @mmap callback must set vma->vm_ops instead. The @mmap
+ * callback is always called with a 0 offset. The caller will remove
+ * the fake offset as necessary.
+ */
+ int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 01f514521687..6748379a0b44 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -88,32 +88,6 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
-/**
- * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for shmem based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_shmem_mmap, \
- }
-
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free_object(struct drm_gem_object *obj);
@@ -143,9 +117,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
-
-extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 6268f89c5a48..118cef76f84f 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -15,5 +15,7 @@
void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *gem);
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma);
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 354a9cd358a3..e040541a105f 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -13,6 +13,9 @@
#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_simple_display_pipe;
struct drm_vram_mm_funcs;
struct filp;
struct vm_area_struct;
@@ -124,6 +127,28 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state);
+
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state);
+
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for \
&struct drm_driver
@@ -184,29 +209,4 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size);
void drm_vram_helper_release_mm(struct drm_device *dev);
-/*
- * Helpers for &struct file_operations
- */
-
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma);
-
-/**
- * define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \
- &struct file_operations
- *
- * Drivers that use VRAM MM can use this macro to initialize
- * &struct file_operations with default functions.
- */
-#define DRM_VRAM_MM_FILE_OPERATIONS \
- .llseek = no_llseek, \
- .read = drm_read, \
- .poll = drm_poll, \
- .unlocked_ioctl = drm_ioctl, \
- .compat_ioctl = drm_compat_ioctl, \
- .mmap = drm_vram_mm_file_operations_mmap, \
- .open = drm_open, \
- .release = drm_release \
-
-
#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
deleted file mode 100644
index ee8d61b64f29..000000000000
--- a/include/drm/drm_os_linux.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
- * \file drm_os_linux.h
- * OS abstraction macros.
- */
-
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/sched/signal.h>
-#include <linux/delay.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-/** Current process ID */
-#define DRM_CURRENTPID task_pid_nr(current)
-#define DRM_UDELAY(d) udelay(d)
-/** Read a byte from a MMIO region */
-#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
-/** Read a word from a MMIO region */
-#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
-/** Read a dword from a MMIO region */
-#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
-/** Write a byte into a MMIO region */
-#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a word into a MMIO region */
-#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a dword into a MMIO region */
-#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
-
-/** Read a qword from a MMIO region - be careful using these unless you really understand them */
-#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
-/** Write a qword into a MMIO region */
-#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
-
-#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index cd5903ad33f7..3f396d94afe4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -140,10 +140,11 @@ struct drm_plane_state {
* @zpos:
* Priority of the given plane on crtc (optional).
*
- * Note that multiple active planes on the same crtc can have an
- * identical zpos value. The rule to solving the conflict is to compare
- * the plane object IDs; the plane with a higher ID must be stacked on
- * top of a plane with a lower ID.
+ * User-space may set mutable zpos properties so that multiple active
+ * planes on the same CRTC have identical zpos values. This is a
+ * user-space bug, but drivers can solve the conflict by comparing the
+ * plane object IDs; the plane with a higher ID is stacked on top of a
+ * plane with a lower ID.
*
* See drm_plane_create_zpos_property() and
* drm_plane_create_zpos_immutable_property() for more details.
@@ -183,8 +184,26 @@ struct drm_plane_state {
*/
struct drm_property_blob *fb_damage_clips;
- /** @src: clipped source coordinates of the plane (in 16.16) */
- /** @dst: clipped destination coordinates of the plane */
+ /**
+ * @src:
+ *
+ * source coordinates of the plane (in 16.16).
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
+ /**
+ * @dst:
+ *
+ * clipped destination coordinates of the plane.
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
struct drm_rect src, dst;
/**
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 4d89cd0a60db..15afee9cf049 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -49,7 +49,7 @@ struct drm_simple_display_pipe_funcs {
*
* drm_mode_status Enum
*/
- enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
+ enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode);
/**
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 43c4929a2171..54fa457b26ab 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -147,7 +147,6 @@ struct ttm_tt;
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
- * @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@@ -199,12 +198,6 @@ struct ttm_buffer_object {
bool evicted;
/**
- * Members protected by the bo::reserved lock only when written to.
- */
-
- atomic_t cpu_writers;
-
- /**
* Members protected by the bdev::lru_lock.
*/
@@ -368,30 +361,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
/**
- * ttm_bo_add_to_lru
- *
- * @bo: The buffer object.
- *
- * Add this bo to the relevant mem type lru and, if it's backed by
- * system pages (ttms) to the swap list.
- * This function must be called with struct ttm_bo_global::lru_lock held, and
- * is typically called immediately prior to unreserving a bo.
- */
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_del_from_lru
- *
- * @bo: The buffer object.
- *
- * Remove this bo from all lru lists used to lookup and reserve an object.
- * This function must be called with struct ttm_bo_global::lru_lock held,
- * and is usually called just immediately after the bo has been reserved to
- * avoid recursive reservation from lru lists.
- */
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
@@ -442,31 +411,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
/**
- * ttm_bo_synccpu_write_grab
- *
- * @bo: The buffer object:
- * @no_wait: Return immediately if buffer is busy.
- *
- * Synchronizes a buffer object for CPU RW access. This means
- * command submission that affects the buffer will return -EBUSY
- * until ttm_bo_synccpu_write_release is called.
- *
- * Returns
- * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
-
-/**
- * ttm_bo_synccpu_write_release:
- *
- * @bo : The buffer object.
- *
- * Releases a synccpu lock.
- */
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_acc_size
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -710,16 +654,14 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
- * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space. The address space will
- * have the same size as the bo, and start at offset 0.
+ * @bo: The bo backing the address space.
*
- * This function is intended to be called by the fbdev mmap method
- * if the fbdev address space is to be backed by a bo.
+ * Maps a buffer object.
*/
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 27b9d5be197f..cac7a8a0825a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -423,7 +423,6 @@ extern struct ttm_bo_global {
*/
struct kobject kobj;
- struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
spinlock_t lru_lock;
@@ -467,7 +466,6 @@ struct ttm_bo_device {
* Constant after bo device init / atomic.
*/
struct list_head device_list;
- struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
@@ -631,9 +629,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
@@ -727,15 +722,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret;
-
WARN_ON(!kref_read(&bo->kref));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
+ return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
@@ -762,9 +751,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
else
dma_resv_lock_slow(bo->base.resv, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
+ if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
@@ -779,12 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
- spin_lock(&bo->bdev->glob->lru_lock);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv);
}
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 7e46cc678e7e..5a19843bb80d 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru);
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 3ff48a0a2d7b..c78ea99c42cf 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -65,7 +65,6 @@
struct ttm_mem_zone;
extern struct ttm_mem_global {
struct kobject kobj;
- struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 90528f12bdfa..29fc933df3bf 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -326,10 +326,11 @@ static inline int bitmap_equal(const unsigned long *src1,
}
/**
- * bitmap_or_equal - Check whether the or of two bitnaps is equal to a third
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
* @src1: Pointer to bitmap 1
* @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
* @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits: number of bits in each of these bitmaps
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b318efd8a74..cdf016596659 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -40,6 +40,7 @@
# define __GCC4_has_attribute___noclone__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___fallthrough__ 0
#endif
/*
@@ -186,6 +187,22 @@
#endif
/*
+ * Add the pseudo keyword 'fallthrough' so case statement blocks
+ * must end with any of these keywords:
+ * break;
+ * fallthrough;
+ * goto <label>;
+ * return [expression];
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
+ */
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
+#endif
+
+/*
* Note the missing underscores.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 8557efe096dc..fa35b52e0002 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -12,26 +12,15 @@
#define DEVCG_DEV_ALL 4 /* this represents all devices */
#ifdef CONFIG_CGROUP_DEVICE
-extern int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access);
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
#else
-static inline int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{ return 0; }
#endif
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
-{
- int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
-
- if (rc)
- return -EPERM;
-
- return __devcgroup_check_permission(type, major, minor, access);
-}
-
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index ec212cb27fdc..af73f835c51c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,6 +43,18 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
+ * @dynamic_mapping:
+ *
+ * If true the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called with the dma_resv object locked.
+ *
+ * If false the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called without the dma_resv object locked.
+ * Mutual exclusive with @cache_sgt_mapping.
+ */
+ bool dynamic_mapping;
+
+ /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -109,6 +121,9 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -267,14 +282,16 @@ struct dma_buf_ops {
* struct dma_buf - shared buffer object
* @size: size of the buffer
* @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached,
+ * protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap, and accesses to name
+ * vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging.
+ * @name: userspace-provided name; useful for accounting and debugging,
+ * protected by @resv.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -323,10 +340,12 @@ struct dma_buf {
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
+ * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
+ * dma_resv lock held.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -343,6 +362,7 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
+ bool dynamic_mapping;
void *priv;
};
@@ -394,10 +414,40 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return dmabuf->ops->dynamic_mapping;
+}
+
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappinsg
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+ * the dma_resv lock held.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return attach->dynamic_mapping;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -409,6 +459,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
diff --git a/include/linux/export.h b/include/linux/export.h
index 95f55b7f83a0..621158ecd2e2 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -52,10 +52,10 @@ extern struct module __this_module;
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 4 \n" \
- "__ksymtab_" #sym NS_SEPARATOR #ns ": \n" \
+ "__ksymtab_" #ns NS_SEPARATOR #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
- " .long __kstrtab_ns_" #sym "- . \n" \
+ " .long __kstrtabns_" #sym "- . \n" \
" .previous \n")
#define __KSYMTAB_ENTRY(sym, sec) \
@@ -76,10 +76,10 @@ struct kernel_symbol {
#else
#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
static const struct kernel_symbol __ksymtab_##sym##__##ns \
- asm("__ksymtab_" #sym NS_SEPARATOR #ns) \
+ asm("__ksymtab_" #ns NS_SEPARATOR #sym) \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym }
+ = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
@@ -112,7 +112,7 @@ struct kernel_symbol {
/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \
___export_symbol_common(sym, sec); \
- static const char __kstrtab_ns_##sym[] \
+ static const char __kstrtabns_##sym[] \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #ns; \
__KSYMTAB_ENTRY_NS(sym, sec, ns)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index f8245d67f070..5dd9c982e2cb 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -202,6 +202,14 @@ struct gpio_irq_chip {
bool threaded;
/**
+ * @init_hw: optional routine to initialize hardware before
+ * an IRQ chip will be added. This is quite useful when
+ * a particular driver wants to clear IRQ related registers
+ * in order to avoid undesired events.
+ */
+ int (*init_hw)(struct gpio_chip *chip);
+
+ /**
* @init_valid_mask: optional routine to initialize @valid_mask, to be
* used if not all GPIO lines are valid interrupts. Sometimes some
* lines just cannot fire interrupts, and this routine, when defined,
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index e6eea45e1154..6f8d772591ba 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -18,6 +18,7 @@ enum host1x_class {
};
struct host1x_client;
+struct iommu_group;
/**
* struct host1x_client_ops - host1x client operations
@@ -34,6 +35,7 @@ struct host1x_client_ops {
* @list: list node for the host1x client
* @parent: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
+ * @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
@@ -44,6 +46,7 @@ struct host1x_client {
struct list_head list;
struct device *parent;
struct device *dev;
+ struct iommu_group *group;
const struct host1x_client_ops *ops;
@@ -64,8 +67,9 @@ struct sg_table;
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys);
+ void (*unpin)(struct device *dev, struct sg_table *sgt);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
@@ -92,15 +96,17 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
+static inline struct sg_table *host1x_bo_pin(struct device *dev,
+ struct host1x_bo *bo,
+ dma_addr_t *phys)
{
- return bo->ops->pin(bo, sgt);
+ return bo->ops->pin(dev, bo, phys);
}
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
+ struct sg_table *sgt)
{
- bo->ops->unpin(bo, sgt);
+ bo->ops->unpin(dev, sgt);
}
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
@@ -158,7 +164,7 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
-struct host1x_channel *host1x_channel_request(struct device *dev);
+struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -167,6 +173,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
+#define HOST1X_RELOC_READ (1 << 0)
+#define HOST1X_RELOC_WRITE (1 << 1)
+
struct host1x_reloc {
struct {
struct host1x_bo *bo;
@@ -177,6 +186,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
+ unsigned long flags;
};
struct host1x_job {
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 04c36b7a61dd..72579168189d 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -235,7 +235,7 @@ enum hwmon_power_attributes {
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b8df71193329..efb309dba914 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -247,7 +247,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
/**
* led_set_brightness_sync - set LED brightness synchronously
* @led_cdev: the LED to set
- * @brightness: the brightness to set it to
+ * @value: the brightness to set it to
*
* Set an LED's brightness immediately. This function will block
* the caller for the time required for accessing device registers,
@@ -301,8 +301,7 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
/**
* led_compose_name - compose LED class device name
* @dev: LED controller device object
- * @child: child fwnode_handle describing a LED or a group of synchronized LEDs;
- * it must be provided only for fwnode based LEDs
+ * @init_data: the LED class device initialization data
* @led_classdev_name: composed LED class device name
*
* Create LED class device name basing on the provided init_data argument.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9b60863429cc..ae703ea3ef48 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -356,6 +356,19 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+ bool in_low_reclaim)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+
+ if (in_low_reclaim)
+ return READ_ONCE(memcg->memory.emin);
+
+ return max(READ_ONCE(memcg->memory.emin),
+ READ_ONCE(memcg->memory.elow));
+}
+
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg);
@@ -537,6 +550,8 @@ void mem_cgroup_handle_over_high(void);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
+
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -829,6 +844,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+ bool in_low_reclaim)
+{
+ return 0;
+}
+
static inline enum mem_cgroup_protection mem_cgroup_protected(
struct mem_cgroup *root, struct mem_cgroup *memcg)
{
@@ -968,6 +989,11 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return 0;
}
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -1264,6 +1290,9 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
struct bdi_writeback *wb)
{
+ if (mem_cgroup_disabled())
+ return;
+
if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
}
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad24554f11f9..75f880c25bb8 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -31,7 +31,7 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
-#define PHY_ID_KSZ8795 0x00221550
+#define PHY_ID_KSZ87XX 0x00221550
#define PHY_ID_KSZ9477 0x00221631
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 682fd465df06..cfce186f0c4e 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -18,7 +18,7 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
- PAGE_EXT_OWNER_ACTIVE,
+ PAGE_EXT_OWNER_ALLOCATED,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
@@ -36,6 +36,7 @@ struct page_ext {
unsigned long flags;
};
+extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
#ifdef CONFIG_SPARSEMEM
@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
struct page_ext *lookup_page_ext(const struct page *page);
+static inline struct page_ext *page_ext_next(struct page_ext *curr)
+{
+ void *next = curr;
+ next += page_ext_size;
+ return next;
+}
+
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 1b5cec067533..f2688404d1cd 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -64,6 +64,8 @@ extern struct resource *platform_get_resource_byname(struct platform_device *,
unsigned int,
const char *);
extern int platform_get_irq_byname(struct platform_device *, const char *);
+extern int platform_get_irq_byname_optional(struct platform_device *dev,
+ const char *name);
extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c2e56bd8913..67a1d86981a9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+asmlinkage void preempt_schedule_irq(void);
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4351577b14d7..7914fdaf4226 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3510,8 +3510,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+ int mac_len);
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
int skb_mpls_dec_ttl(struct sk_buff *skb);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ab2b98ad76e1..4d2a2fa55ed5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -493,6 +493,10 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
*
+ * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
+ * bytes. For @size of power of two bytes, the alignment is also guaranteed
+ * to be at least to the size.
+ *
* The @flags argument may be one of the GFP flags defined at
* include/linux/gfp.h and described at
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
diff --git a/include/linux/string.h b/include/linux/string.h
index b2f9df7f0761..b6ccdc2c7f02 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -227,7 +227,26 @@ static inline bool strstarts(const char *str, const char *prefix)
}
size_t memweight(const void *ptr, size_t bytes);
-void memzero_explicit(void *s, size_t count);
+
+/**
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
+ * keying data) with 0s.
+ * @s: Pointer to the start of the area.
+ * @count: The size of the area.
+ *
+ * Note: usually using memset() is just fine (!), but in cases
+ * where clearing out _local_ data at the end of a scope is
+ * necessary, memzero_explicit() should be used instead in
+ * order to prevent the compiler from optimising away zeroing.
+ *
+ * memzero_explicit() doesn't need an arch-specific version as
+ * it just invokes the one of memset() implicitly.
+ */
+static inline void memzero_explicit(void *s, size_t count)
+{
+ memset(s, 0, count);
+ barrier_data(s);
+}
/**
* kbasename - return the last part of a pathname.
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 7638dbe7bc50..a940de03808d 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -61,6 +61,7 @@ struct sock_xprt {
struct mutex recv_mutex;
struct sockaddr_storage srcaddr;
unsigned short srcport;
+ int xprt_err;
/*
* UDP socket buffer size parameters
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 99617e528ea2..668e25a76d69 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -393,7 +393,7 @@ struct tcp_sock {
/* fastopen_rsk points to request_sock that resulted in this big
* socket. Used to retransmit SYNACKs etc.
*/
- struct request_sock *fastopen_rsk;
+ struct request_sock __rcu *fastopen_rsk;
u32 *saved_syn;
};
@@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
static inline bool tcp_passive_fastopen(const struct sock *sk)
{
- return (sk->sk_state == TCP_SYN_RECV &&
- tcp_sk(sk)->fastopen_rsk != NULL);
+ return sk->sk_state == TCP_SYN_RECV &&
+ rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
}
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 63238c84dc0b..131ea1bad458 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -152,7 +152,7 @@ struct tcg_algorithm_info {
* total. Once we've done this we know the offset of the data length field,
* and can calculate the total size of the event.
*
- * Return: size of the event on success, <0 on failure
+ * Return: size of the event on success, 0 on failure
*/
static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
@@ -170,6 +170,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
u16 halg;
int i;
int j;
+ u32 count, event_type;
marker = event;
marker_start = marker;
@@ -190,16 +191,22 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
}
event = (struct tcg_pcr_event2_head *)mapping;
+ /*
+ * The loop below will unmap these fields if the log is larger than
+ * one page, so save them here for reference:
+ */
+ count = READ_ONCE(event->count);
+ event_type = READ_ONCE(event->event_type);
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
/* Check if event is malformed. */
- if (event->count > efispecid->num_algs) {
+ if (count > efispecid->num_algs) {
size = 0;
goto out;
}
- for (i = 0; i < event->count; i++) {
+ for (i = 0; i < count; i++) {
halg_size = sizeof(event->digests[i].alg_id);
/* Map the digest's algorithm identifier */
@@ -256,8 +263,9 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ event_field->event_size;
size = marker - marker_start;
- if ((event->event_type == 0) && (event_field->event_size == 0))
+ if (event_type == 0 && event_field->event_size == 0)
size = 0;
+
out:
if (do_mapping)
TPM_MEMUNMAP(mapping, mapping_size);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index e47d0522a1f4..d4ee6e942562 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -355,8 +355,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
#ifndef user_access_begin
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
+#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
+#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 5921599b6dc4..86eecbd98e84 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -230,8 +230,8 @@ static inline int xa_err(void *entry)
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
* Two common ranges are predefined for you:
- * * xa_limit_32b - [0 - UINT_MAX]
- * * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_32b - [0 - UINT_MAX]
+ * * xa_limit_31b - [0 - INT_MAX]
*/
struct xa_limit {
u32 max;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ff45c3e1abff..4ab2c49423dc 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5550,6 +5550,14 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
/**
+ * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+ *
+ * Pre-CAC is allowed only in some regdomains (notable ETSI).
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
+/**
* DOC: Internal regulatory db functions
*
*/
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index df528a623548..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index f8712bbeb2e0..4c2cd9378699 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,6 +52,9 @@ struct bpf_prog;
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
+ /* First cache line can be often dirtied.
+ * Do not place here read-mostly fields.
+ */
refcount_t passive; /* To decide when the network
* namespace should be freed.
*/
@@ -60,7 +63,13 @@ struct net {
*/
spinlock_t rules_mod_lock;
- u32 hash_mix;
+ unsigned int dev_unreg_count;
+
+ unsigned int dev_base_seq; /* protected by rtnl_mutex */
+ int ifindex;
+
+ spinlock_t nsid_lock;
+ atomic_t fnhe_genid;
struct list_head list; /* list of network namespaces */
struct list_head exit_list; /* To linked to call pernet exit
@@ -76,11 +85,11 @@ struct net {
#endif
struct user_namespace *user_ns; /* Owning user namespace */
struct ucounts *ucounts;
- spinlock_t nsid_lock;
struct idr netns_ids;
struct ns_common ns;
+ struct list_head dev_base_head;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
@@ -93,17 +102,18 @@ struct net {
struct uevent_sock *uevent_sock; /* uevent socket */
- struct list_head dev_base_head;
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
- unsigned int dev_base_seq; /* protected by rtnl_mutex */
- int ifindex;
- unsigned int dev_unreg_count;
+ /* Note that @hash_mix can be read millions times per second,
+ * it is critical that it is on a read_mostly cache line.
+ */
+ u32 hash_mix;
+
+ struct net_device *loopback_dev; /* The loopback */
/* core fib_rules */
struct list_head rules_ops;
- struct net_device *loopback_dev; /* The loopback */
struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
@@ -171,7 +181,6 @@ struct net {
struct sock *crypto_nlsk;
#endif
struct sock *diag_nlsk;
- atomic_t fnhe_genid;
} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index fd178d58fa84..cf8b33213bbc 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
{
- return queue->rskq_accept_head == NULL;
+ return READ_ONCE(queue->rskq_accept_head) == NULL;
}
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
@@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
req = queue->rskq_accept_head;
if (req) {
sk_acceptq_removed(parent);
- queue->rskq_accept_head = req->dl_next;
+ WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
if (queue->rskq_accept_head == NULL)
queue->rskq_accept_tail = NULL;
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 5d60f13d2347..3ab5c6bbb90b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
}
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 2c53f1a1d905..f69b58bff7e5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(const struct sock *sk)
*/
static inline int sk_stream_min_wspace(const struct sock *sk)
{
- return sk->sk_wmem_queued >> 1;
+ return READ_ONCE(sk->sk_wmem_queued) >> 1;
}
static inline int sk_stream_wspace(const struct sock *sk)
{
- return sk->sk_sndbuf - sk->sk_wmem_queued;
+ return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
+}
+
+static inline void sk_wmem_queued_add(struct sock *sk, int val)
+{
+ WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
void sk_stream_write_space(struct sock *sk);
@@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
- if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+ if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
return sk->sk_prot->stream_memory_free ?
@@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
- sk->sk_wmem_queued -= skb->truesize;
+ sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
@@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->sk_wmem_queued += copy;
+ sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
return 0;
}
@@ -2220,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band)
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
- if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
- sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
- sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
- }
+ u32 val;
+
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ return;
+
+ val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+
+ WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
@@ -2251,7 +2260,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
*/
static inline bool sock_writeable(const struct sock *sk)
{
- return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+ return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
}
static inline gfp_t gfp_any(void)
@@ -2271,7 +2280,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
- return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+ int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
+
+ return v ?: 1;
}
/* Alas, with timeout socket operations are not restartable.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index c9a3f9688223..ab4eb5eb5d07 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return tcp_memory_pressure;
+ return READ_ONCE(tcp_memory_pressure);
}
/*
* The next routines deal with comparing 32 bit unsigned ints
@@ -1380,13 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(sk->sk_backlog.len) -
atomic_read(&sk->sk_rmem_alloc));
}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf);
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1916,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+ u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 3810b340551c..6bd5ed695a5e 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
int result;
+ unsigned int resid_len;
int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 0fd39295b426..057d2a2d0bd0 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -264,6 +264,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define ML_LOSIDV_STREAM_MASK 0xFFFE
+
#define ML_LCTL_SCF_MASK 0xF
#define AZX_MLCTL_SPA (0x1 << 16)
#define AZX_MLCTL_CPA (0x1 << 23)
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index edc5c887a44c..191fe447f990 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local,
);
TRACE_EVENT(rxrpc_peer,
- TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
+ TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
int usage, const void *where),
- TP_ARGS(peer, op, usage, where),
+ TP_ARGS(peer_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, peer )
@@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer,
),
TP_fast_assign(
- __entry->peer = peer->debug_id;
+ __entry->peer = peer_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
@@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer,
);
TRACE_EVENT(rxrpc_conn,
- TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
int usage, const void *where),
- TP_ARGS(conn, op, usage, where),
+ TP_ARGS(conn_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, conn )
@@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn,
),
TP_fast_assign(
- __entry->conn = conn->debug_id;
+ __entry->conn = conn_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
@@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
);
TRACE_EVENT(rxrpc_call,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
int usage, const void *where, const void *aux),
- TP_ARGS(call, op, usage, where, aux),
+ TP_ARGS(call_debug_id, op, usage, where, aux),
TP_STRUCT__entry(
__field(unsigned int, call )
@@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index a0c4b8a30966..51fe9f6719eb 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full,
TP_fast_assign(
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->truesize = skb->truesize;
- __entry->sk_rcvbuf = sk->sk_rcvbuf;
+ __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf);
),
TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
@@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->sysctl_wmem = sk_get_wmem0(sk, prot);
__entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
- __entry->wmem_queued = sk->sk_wmem_queued;
+ __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
__entry->kind = kind;
),
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4fe35d600ab8..bbdad866e3fe 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -500,6 +500,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_CC (3 << 5)
/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
+/* Use RW MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_RW (5 << 5)
struct drm_amdgpu_gem_va {
/** GEM object handle */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 8a5b2f8f8eb9..868bf7996c0f 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,11 +778,12 @@ struct drm_syncobj_array {
__u32 pad;
};
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
struct drm_syncobj_timeline_array {
__u64 handles;
__u64 points;
__u32 count_handles;
- __u32 pad;
+ __u32 flags;
};
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2376d36ea573..8caaaf7ff91b 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -69,7 +69,7 @@ extern "C" {
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
-#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
/* Reserve 0 for the invalid format specifier */
#define DRM_FORMAT_INVALID 0
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 3e59b8382dd8..45c6582b3df3 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -68,7 +68,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
- * @connection: indicate whether doing connetion or not by user.
+ * @connection: indicate whether doing connection or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 1fccffef9e27..5a142fad473c 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -38,20 +38,20 @@ struct drm_omap_param {
__u64 value; /* in (set_param), out (get_param) */
};
-#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
-#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
-#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+/* Scanout buffer, consumable by DSS */
+#define OMAP_BO_SCANOUT 0x00000001
-/* cache modes */
-#define OMAP_BO_CACHED 0x00000000 /* default */
-#define OMAP_BO_WC 0x00000002 /* write-combine */
-#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+/* Buffer CPU caching mode: cached, write-combining or uncached. */
+#define OMAP_BO_CACHED 0x00000000
+#define OMAP_BO_WC 0x00000002
+#define OMAP_BO_UNCACHED 0x00000004
+#define OMAP_BO_CACHE_MASK 0x00000006
-/* tiled modes */
+/* Use TILER for the buffer. The TILER container unit can be 8, 16 or 32 bits. */
#define OMAP_BO_TILED_8 0x00000100
#define OMAP_BO_TILED_16 0x00000200
#define OMAP_BO_TILED_32 0x00000300
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+#define OMAP_BO_TILED_MASK 0x00000f00
union omap_gem_size {
__u32 bytes; /* (for non-tiled formats) */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 0f4f87a6fd54..e7fe550b6038 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -291,6 +291,6 @@
#define PORT_SUNIX 121
/* Freescale Linflex UART */
-#define PORT_LINFLEXUART 121
+#define PORT_LINFLEXUART 122
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3f0cb82e4fbc..9ec0b0bfddbd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3779,11 +3779,23 @@ static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
perf_event_groups_insert(&ctx->flexible_groups, event);
}
+/* pick an event from the flexible_groups to rotate */
static inline struct perf_event *
-ctx_first_active(struct perf_event_context *ctx)
+ctx_event_to_rotate(struct perf_event_context *ctx)
{
- return list_first_entry_or_null(&ctx->flexible_active,
- struct perf_event, active_list);
+ struct perf_event *event;
+
+ /* pick the first active flexible event */
+ event = list_first_entry_or_null(&ctx->flexible_active,
+ struct perf_event, active_list);
+
+ /* if no active flexible event, pick the first event */
+ if (!event) {
+ event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree),
+ typeof(*event), group_node);
+ }
+
+ return event;
}
static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
@@ -3808,9 +3820,9 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_pmu_disable(cpuctx->ctx.pmu);
if (task_rotate)
- task_event = ctx_first_active(task_ctx);
+ task_event = ctx_event_to_rotate(task_ctx);
if (cpu_rotate)
- cpu_event = ctx_first_active(&cpuctx->ctx);
+ cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
/*
* As per the order given at ctx_resched() first 'pop' task flexible
@@ -5668,7 +5680,8 @@ again:
* undo the VM accounting.
*/
- atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+ atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
+ &mmap_user->locked_vm);
atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user);
@@ -5812,8 +5825,20 @@ accounting:
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
- if (user_locked > user_lock_limit)
+ if (user_locked <= user_lock_limit) {
+ /* charge all to locked_vm */
+ } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
+ /* charge all to pinned_vm */
+ extra = user_extra;
+ user_extra = 0;
+ } else {
+ /*
+ * charge locked_vm until it hits user_lock_limit;
+ * charge the rest from pinned_vm
+ */
extra = user_locked - user_lock_limit;
+ user_extra -= extra;
+ }
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -11862,6 +11887,10 @@ static int inherit_group(struct perf_event *parent_event,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
+
+ if (sub->aux_event == parent_event &&
+ !perf_get_aux_event(child_ctr, leader))
+ return -EINVAL;
}
return 0;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 94d38a39d72e..c74761004ee5 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -474,14 +474,17 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
bool orig_page_huge = false;
+ unsigned int gup_flags = FOLL_FORCE;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
retry:
+ if (is_register)
+ gup_flags |= FOLL_SPLIT_PMD;
/* Read the page with vaddr into memory */
- ret = get_user_pages_remote(NULL, mm, vaddr, 1,
- FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
+ &old_page, &vma, NULL);
if (ret <= 0)
return ret;
@@ -489,6 +492,12 @@ retry:
if (ret <= 0)
goto put_old;
+ if (WARN(!is_register && PageCompound(old_page),
+ "uprobe unregister should never work on compound page\n")) {
+ ret = -EINVAL;
+ goto put_old;
+ }
+
/* We are going to replace instruction, update ref_ctr. */
if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
diff --git a/kernel/fork.c b/kernel/fork.c
index 1f6c45f6a734..bcdf53125210 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2925,7 +2925,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
struct ctl_table t;
int ret;
int threads = max_threads;
- int min = MIN_THREADS;
+ int min = 1;
int max = MAX_THREADS;
t = *table;
@@ -2937,7 +2937,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
if (ret || !write)
return ret;
- set_max_threads(threads);
+ max_threads = threads;
return 0;
}
diff --git a/kernel/freezer.c b/kernel/freezer.c
index c0738424bb43..dc520f01f99d 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -22,12 +22,6 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing;
bool pm_nosig_freezing;
-/*
- * Temporary export for the deadlock workaround in ata_scsi_hotplug().
- * Remove once the hack becomes unnecessary.
- */
-EXPORT_SYMBOL_GPL(pm_freezing);
-
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index aff79e461fc9..5a0fc0b0403a 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -71,10 +71,13 @@ done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
find $cpio_dir -type f -print0 |
xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
-# Create archive and try to normalize metadata for reproducibility
-tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
- -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --owner=0 --group=0 --numeric-owner --no-recursion \
+ -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
echo "$src_files_md5" > kernel/kheaders.md5
echo "$obj_files_md5" >> kernel/kheaders.md5
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 621467c33fef..b262f47046ca 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -866,9 +866,9 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-void __kthread_queue_delayed_work(struct kthread_worker *worker,
- struct kthread_delayed_work *dwork,
- unsigned long delay)
+static void __kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
diff --git a/kernel/panic.c b/kernel/panic.c
index 47e8ebccc22b..f470a038b05b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -180,6 +180,7 @@ void panic(const char *fmt, ...)
* after setting panic_cpu) from invoking panic() again.
*/
local_irq_disable();
+ preempt_disable_notrace();
/*
* It's possible to come here directly from a panic-assertion and
diff --git a/kernel/power/main.c b/kernel/power/main.c
index e8710d179b35..e26de7af520b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
#include "power.h"
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 2305ce89a26c..46ed4e1383e2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
write_seqcount_begin(&vtime->seqcount);
/* We might have scheduled out from guest path */
- if (current->flags & PF_VCPU)
+ if (tsk->flags & PF_VCPU)
vtime_account_guest(tsk, vtime);
else
__vtime_account_system(tsk, vtime);
@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
*/
write_seqcount_begin(&vtime->seqcount);
__vtime_account_system(tsk, vtime);
- current->flags |= PF_VCPU;
+ tsk->flags |= PF_VCPU;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime);
- current->flags &= ~PF_VCPU;
+ tsk->flags &= ~PF_VCPU;
write_seqcount_end(&vtime->seqcount);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 83ab35e2374f..682a754ea3e1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period);
- new = (old * 147) / 128; /* ~115% */
- new = min(new, max_cfs_quota_period);
-
- cfs_b->period = ns_to_ktime(new);
-
- /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
- cfs_b->quota *= new;
- cfs_b->quota = div64_u64(cfs_b->quota, old);
-
- pr_warn_ratelimited(
- "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
- smp_processor_id(),
- div_u64(new, NSEC_PER_USEC),
- div_u64(cfs_b->quota, NSEC_PER_USEC));
+ /*
+ * Grow period by a factor of 2 to avoid losing precision.
+ * Precision loss in the quota/period ratio can cause __cfs_schedulable
+ * to fail.
+ */
+ new = old * 2;
+ if (new < max_cfs_quota_period) {
+ cfs_b->period = ns_to_ktime(new);
+ cfs_b->quota *= 2;
+
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(new, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+ } else {
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(old, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+ }
/* reset count so we don't come right back in here */
count = 0;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index c7031a22aa7b..998d50ee2d9b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -7,6 +7,7 @@
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*/
+#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/init.h>
@@ -167,7 +168,7 @@ static void set_state(struct multi_stop_data *msdata,
/* Reset ack counter. */
atomic_set(&msdata->thread_ack, msdata->num_threads);
smp_wmb();
- msdata->state = newstate;
+ WRITE_ONCE(msdata->state, newstate);
}
/* Last one to ack a state moves to the next state. */
@@ -186,7 +187,7 @@ void __weak stop_machine_yield(const struct cpumask *cpumask)
static int multi_cpu_stop(void *data)
{
struct multi_stop_data *msdata = data;
- enum multi_stop_state curstate = MULTI_STOP_NONE;
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
int cpu = smp_processor_id(), err = 0;
const struct cpumask *cpumask;
unsigned long flags;
@@ -210,8 +211,9 @@ static int multi_cpu_stop(void *data)
do {
/* Chill out and ensure we re-read multi_stop_state. */
stop_machine_yield(cpumask);
- if (msdata->state != curstate) {
- curstate = msdata->state;
+ newstate = READ_ONCE(msdata->state);
+ if (newstate != curstate) {
+ curstate = newstate;
switch (curstate) {
case MULTI_STOP_DISABLE_IRQ:
local_irq_disable();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 00fcea236eba..b6f2f35d0bcf 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -163,7 +163,7 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
#ifdef CONFIG_SPARC
#endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
extern int pwrsw_enabled;
#endif
@@ -620,7 +620,7 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
{
.procname = "soft-power",
.data = &pwrsw_enabled,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 0d4dc241c0fb..65605530ee34 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -164,7 +164,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
struct hrtimer_clock_base *base;
for (;;) {
- base = timer->base;
+ base = READ_ONCE(timer->base);
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
@@ -244,7 +244,7 @@ again:
return base;
/* See the comment in lock_hrtimer_base() */
- timer->base = &migration_base;
+ WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
@@ -253,10 +253,10 @@ again:
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
- timer->base = base;
+ WRITE_ONCE(timer->base, base);
goto again;
}
- timer->base = new_base;
+ WRITE_ONCE(timer->base, new_base);
} else {
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 62a50bf399d6..f296d89be757 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -18,6 +18,7 @@
#include <linux/clocksource.h>
#include <linux/sched/task.h>
#include <linux/kallsyms.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/tracefs.h>
#include <linux/hardirq.h>
@@ -3486,6 +3487,11 @@ static int
ftrace_avail_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
@@ -3505,6 +3511,15 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
+ /*
+ * This shows us what functions are currently being
+ * traced and by what. Not sure if we want lockdown
+ * to hide such critical information for an admin.
+ * Although, perhaps it can show information we don't
+ * want people to see, but if something is tracing
+ * something, we probably want to know about it.
+ */
+
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (!iter)
return -ENOMEM;
@@ -3540,21 +3555,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
struct ftrace_hash *hash;
struct list_head *mod_head;
struct trace_array *tr = ops->private;
- int ret = 0;
+ int ret = -ENOMEM;
ftrace_ops_init(ops);
if (unlikely(ftrace_disabled))
return -ENODEV;
+ if (tracing_check_open_get_tr(tr))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
- return -ENOMEM;
+ goto out;
- if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
- kfree(iter);
- return -ENOMEM;
- }
+ if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+ goto out;
iter->ops = ops;
iter->flags = flag;
@@ -3584,13 +3600,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
if (!iter->hash) {
trace_parser_put(&iter->parser);
- kfree(iter);
- ret = -ENOMEM;
goto out_unlock;
}
} else
iter->hash = hash;
+ ret = 0;
+
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
@@ -3602,7 +3618,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
/* Failed */
free_ftrace_hash(iter->hash);
trace_parser_put(&iter->parser);
- kfree(iter);
}
} else
file->private_data = iter;
@@ -3610,6 +3625,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
out_unlock:
mutex_unlock(&ops->func_hash->regex_lock);
+ out:
+ if (ret) {
+ kfree(iter);
+ if (tr)
+ trace_array_put(tr);
+ }
+
return ret;
}
@@ -3618,6 +3640,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
{
struct ftrace_ops *ops = inode->i_private;
+ /* Checks for tracefs lockdown */
return ftrace_regex_open(ops,
FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
inode, file);
@@ -3628,6 +3651,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
{
struct ftrace_ops *ops = inode->i_private;
+ /* Checks for tracefs lockdown */
return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
inode, file);
}
@@ -5037,6 +5061,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_unlock(&iter->ops->func_hash->regex_lock);
free_ftrace_hash(iter->hash);
+ if (iter->tr)
+ trace_array_put(iter->tr);
kfree(iter);
return 0;
@@ -5194,9 +5220,13 @@ static int
__ftrace_graph_open(struct inode *inode, struct file *file,
struct ftrace_graph_data *fgd)
{
- int ret = 0;
+ int ret;
struct ftrace_hash *new_hash = NULL;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
if (file->f_mode & FMODE_WRITE) {
const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -6537,8 +6567,9 @@ ftrace_pid_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret = 0;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 252f79c435f8..6a0ee9178365 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -17,6 +17,7 @@
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
@@ -304,6 +305,23 @@ void trace_array_put(struct trace_array *this_tr)
mutex_unlock(&trace_types_lock);
}
+int tracing_check_open_get_tr(struct trace_array *tr)
+{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
+ if (tr && trace_array_get(tr) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
@@ -4140,8 +4158,11 @@ release:
int tracing_open_generic(struct inode *inode, struct file *filp)
{
- if (tracing_disabled)
- return -ENODEV;
+ int ret;
+
+ ret = tracing_check_open_get_tr(NULL);
+ if (ret)
+ return ret;
filp->private_data = inode->i_private;
return 0;
@@ -4156,15 +4177,14 @@ bool tracing_is_disabled(void)
* Open and update trace_array ref count.
* Must have the current trace_array passed to it.
*/
-static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+int tracing_open_generic_tr(struct inode *inode, struct file *filp)
{
struct trace_array *tr = inode->i_private;
+ int ret;
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
filp->private_data = inode->i_private;
@@ -4233,10 +4253,11 @@ static int tracing_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
- int ret = 0;
+ int ret;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
@@ -4352,12 +4373,15 @@ static int show_traces_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret;
- if (tracing_disabled)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
ret = seq_open(file, &show_traces_seq_ops);
- if (ret)
+ if (ret) {
+ trace_array_put(tr);
return ret;
+ }
m = file->private_data;
m->private = tr;
@@ -4365,6 +4389,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
return 0;
}
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+ return seq_release(inode, file);
+}
+
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
@@ -4395,8 +4427,8 @@ static const struct file_operations tracing_fops = {
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
- .release = seq_release,
.llseek = seq_lseek,
+ .release = show_traces_release,
};
static ssize_t
@@ -4697,11 +4729,9 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret;
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
ret = single_open(file, tracing_trace_options_show, inode->i_private);
if (ret < 0)
@@ -5038,8 +5068,11 @@ static const struct seq_operations tracing_saved_tgids_seq_ops = {
static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
{
- if (tracing_disabled)
- return -ENODEV;
+ int ret;
+
+ ret = tracing_check_open_get_tr(NULL);
+ if (ret)
+ return ret;
return seq_open(filp, &tracing_saved_tgids_seq_ops);
}
@@ -5115,8 +5148,11 @@ static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
{
- if (tracing_disabled)
- return -ENODEV;
+ int ret;
+
+ ret = tracing_check_open_get_tr(NULL);
+ if (ret)
+ return ret;
return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
}
@@ -5280,8 +5316,11 @@ static const struct seq_operations tracing_eval_map_seq_ops = {
static int tracing_eval_map_open(struct inode *inode, struct file *filp)
{
- if (tracing_disabled)
- return -ENODEV;
+ int ret;
+
+ ret = tracing_check_open_get_tr(NULL);
+ if (ret)
+ return ret;
return seq_open(filp, &tracing_eval_map_seq_ops);
}
@@ -5804,13 +5843,11 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
- int ret = 0;
-
- if (tracing_disabled)
- return -ENODEV;
+ int ret;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
mutex_lock(&trace_types_lock);
@@ -5999,6 +6036,7 @@ waitagain:
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
cpumask_clear(iter->started);
+ trace_seq_init(&iter->seq);
iter->pos = -1;
trace_event_read_lock();
@@ -6547,11 +6585,9 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret;
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr))
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
ret = single_open(file, tracing_clock_show, inode->i_private);
if (ret < 0)
@@ -6581,11 +6617,9 @@ static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret;
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr))
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
if (ret < 0)
@@ -6638,10 +6672,11 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
struct seq_file *m;
- int ret = 0;
+ int ret;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file, true);
@@ -6786,6 +6821,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
struct ftrace_buffer_info *info;
int ret;
+ /* The following checks for tracefs lockdown */
ret = tracing_buffers_open(inode, filp);
if (ret < 0)
return ret;
@@ -7105,8 +7141,9 @@ static int tracing_err_log_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret = 0;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
/* If this file was opened for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
@@ -7157,11 +7194,9 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
struct ftrace_buffer_info *info;
int ret;
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f801d154ff6a..d685c61085c0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -338,6 +338,7 @@ extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
extern void trace_array_put(struct trace_array *tr);
+extern int tracing_check_open_get_tr(struct trace_array *tr);
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -681,6 +682,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
+int tracing_open_generic_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index a41fed46c285..89779eb84a07 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -174,6 +174,10 @@ static int dyn_event_open(struct inode *inode, struct file *file)
{
int ret;
+ ret = tracing_check_open_get_tr(NULL);
+ if (ret)
+ return ret;
+
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
ret = dyn_events_release_all(NULL);
if (ret < 0)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index b89cdfe20bc1..fba87d10f0c1 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) fmt
#include <linux/workqueue.h>
+#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/tracefs.h>
@@ -1294,6 +1295,8 @@ static int trace_format_open(struct inode *inode, struct file *file)
struct seq_file *m;
int ret;
+ /* Do we want to hide event format files on tracefs lockdown? */
+
ret = seq_open(file, &trace_format_seq_ops);
if (ret < 0)
return ret;
@@ -1440,28 +1443,17 @@ static int system_tr_open(struct inode *inode, struct file *filp)
struct trace_array *tr = inode->i_private;
int ret;
- if (tracing_is_disabled())
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
-
/* Make a temporary dir that has no system but points to tr */
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
- if (!dir) {
- trace_array_put(tr);
+ if (!dir)
return -ENOMEM;
- }
- dir->tr = tr;
-
- ret = tracing_open_generic(inode, filp);
+ ret = tracing_open_generic_tr(inode, filp);
if (ret < 0) {
- trace_array_put(tr);
kfree(dir);
return ret;
}
-
+ dir->tr = tr;
filp->private_data = dir;
return 0;
@@ -1771,6 +1763,10 @@ ftrace_event_open(struct inode *inode, struct file *file,
struct seq_file *m;
int ret;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
ret = seq_open(file, seq_ops);
if (ret < 0)
return ret;
@@ -1795,6 +1791,7 @@ ftrace_event_avail_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_event_seq_ops;
+ /* Checks for tracefs lockdown */
return ftrace_event_open(inode, file, seq_ops);
}
@@ -1805,8 +1802,9 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
@@ -1825,8 +1823,9 @@ ftrace_event_set_pid_open(struct inode *inode, struct file *file)
struct trace_array *tr = inode->i_private;
int ret;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 9468bd8d44a2..57648c5aa679 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
@@ -1448,6 +1449,10 @@ static int synth_events_open(struct inode *inode, struct file *file)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
ret = dyn_events_release_all(&synth_event_ops);
if (ret < 0)
@@ -1680,7 +1685,7 @@ static int save_hist_vars(struct hist_trigger_data *hist_data)
if (var_data)
return 0;
- if (trace_array_get(tr) < 0)
+ if (tracing_check_open_get_tr(tr))
return -ENODEV;
var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
@@ -5515,6 +5520,12 @@ static int hist_show(struct seq_file *m, void *v)
static int event_hist_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
return single_open(file, hist_show, file);
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 2a2912cb4533..2cd53ca21b51 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -5,6 +5,7 @@
* Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
*/
+#include <linux/security.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
@@ -173,7 +174,11 @@ static const struct seq_operations event_triggers_seq_ops = {
static int event_trigger_regex_open(struct inode *inode, struct file *file)
{
- int ret = 0;
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
mutex_lock(&event_mutex);
@@ -292,6 +297,7 @@ event_trigger_write(struct file *filp, const char __user *ubuf,
static int
event_trigger_open(struct inode *inode, struct file *filp)
{
+ /* Checks for tracefs lockdown */
return event_trigger_regex_open(inode, filp);
}
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index fa95139445b2..862f4b0139fc 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
if (enter)
nmi_ts_start = time_get();
else
- nmi_total_ts = time_get() - nmi_ts_start;
+ nmi_total_ts += time_get() - nmi_ts_start;
}
if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
/* Keep a running maximum ever recorded hardware latency */
if (sample > tr->max_latency)
tr->max_latency = sample;
+ if (outer_sample > tr->max_latency)
+ tr->max_latency = outer_sample;
}
out:
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 324ffbea3556..1552a95c743b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -7,11 +7,11 @@
*/
#define pr_fmt(fmt) "trace_kprobe: " fmt
+#include <linux/security.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/rculist.h>
#include <linux/error-injection.h>
-#include <linux/security.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
@@ -936,6 +936,10 @@ static int probes_open(struct inode *inode, struct file *file)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
ret = dyn_events_release_all(&trace_kprobe_ops);
if (ret < 0)
@@ -988,6 +992,12 @@ static const struct seq_operations profile_seq_op = {
static int profile_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
return seq_open(file, &profile_seq_op);
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index c3fd849d4a8f..d4e31e969206 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -6,6 +6,7 @@
*
*/
#include <linux/seq_file.h>
+#include <linux/security.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
@@ -348,6 +349,12 @@ static const struct seq_operations show_format_seq_ops = {
static int
ftrace_formats_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
return seq_open(file, &show_format_seq_ops);
}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index ec9a34a97129..4df9a209f7ca 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -5,6 +5,7 @@
*/
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
+#include <linux/security.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
@@ -470,6 +471,12 @@ static const struct seq_operations stack_trace_seq_ops = {
static int stack_trace_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
return seq_open(file, &stack_trace_seq_ops);
}
@@ -487,6 +494,7 @@ stack_trace_filter_open(struct inode *inode, struct file *file)
{
struct ftrace_ops *ops = inode->i_private;
+ /* Checks for tracefs lockdown */
return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
inode, file);
}
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 75bf1bcb4a8a..9ab0a1a7ad5e 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -9,7 +9,7 @@
*
*/
-
+#include <linux/security.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
@@ -238,6 +238,10 @@ static int tracing_stat_open(struct inode *inode, struct file *file)
struct seq_file *m;
struct stat_session *session = inode->i_private;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
ret = stat_seq_init(session);
if (ret)
return ret;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index dd884341f5c5..352073d36585 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -7,6 +7,7 @@
*/
#define pr_fmt(fmt) "trace_uprobe: " fmt
+#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -769,6 +770,10 @@ static int probes_open(struct inode *inode, struct file *file)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
ret = dyn_events_release_all(&trace_uprobe_ops);
if (ret)
@@ -818,6 +823,12 @@ static const struct seq_operations profile_seq_op = {
static int profile_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
return seq_open(file, &profile_seq_op);
}
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index ae25e2fa2187..f25eb111c051 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -2,6 +2,7 @@
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
+#include <linux/kmemleak.h>
#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
@@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset)
}
EXPORT_SYMBOL(__genradix_ptr);
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ struct genradix_node *node;
+
+ node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+ /*
+ * We're using pages (not slab allocations) directly for kernel data
+ * structures, so we need to explicitly inform kmemleak of them in order
+ * to avoid false positive memory leak reports.
+ */
+ kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+ return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kmemleak_free(node);
+ free_page((unsigned long)node);
+}
+
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
@@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
break;
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
@@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
n = READ_ONCE(*p);
if (!n) {
if (!new_node) {
- new_node = (void *)
- __get_free_page(gfp_mask|__GFP_ZERO);
+ new_node = genradix_alloc_node(gfp_mask);
if (!new_node)
return NULL;
}
@@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
}
if (new_node)
- free_page((unsigned long) new_node);
+ genradix_free_node(new_node);
return &n->data[offset];
}
@@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level)
genradix_free_recurse(n->children[i], level - 1);
}
- free_page((unsigned long) n);
+ genradix_free_node(n);
}
int __genradix_prealloc(struct __genradix *radix, size_t size,
diff --git a/lib/string.c b/lib/string.c
index cd7a10c19210..08ec58cc673b 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -748,27 +748,6 @@ void *memset(void *s, int c, size_t count)
EXPORT_SYMBOL(memset);
#endif
-/**
- * memzero_explicit - Fill a region of memory (e.g. sensitive
- * keying data) with 0s.
- * @s: Pointer to the start of the area.
- * @count: The size of the area.
- *
- * Note: usually using memset() is just fine (!), but in cases
- * where clearing out _local_ data at the end of a scope is
- * necessary, memzero_explicit() should be used instead in
- * order to prevent the compiler from optimising away zeroing.
- *
- * memzero_explicit() doesn't need an arch-specific version as
- * it just invokes the one of memset() implicitly.
- */
-void memzero_explicit(void *s, size_t count)
-{
- memset(s, 0, count);
- barrier_data(s);
-}
-EXPORT_SYMBOL(memzero_explicit);
-
#ifndef __HAVE_ARCH_MEMSET16
/**
* memset16() - Fill a memory area with a uint16_t
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 9729f271d150..9742e5cb853a 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -297,6 +297,32 @@ out:
return 1;
}
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ int i, iter, maxiter = 1024;
+ int num, bytes;
+ bool fail = false;
+ void *objects[10];
+
+ c = kmem_cache_create("test_cache", size, size, 0, NULL);
+ for (iter = 0; (iter < maxiter) && !fail; iter++) {
+ num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+ objects);
+ for (i = 0; i < num; i++) {
+ bytes = count_nonzero_bytes(objects[i], size);
+ if (bytes)
+ fail = true;
+ fill_with_garbage(objects[i], size);
+ }
+
+ if (num)
+ kmem_cache_free_bulk(c, num, objects);
+ }
+ *total_failures += fail;
+ return 1;
+}
+
/*
* Test kmem_cache allocation by creating caches of different sizes, with and
* without constructors, with and without SLAB_TYPESAFE_BY_RCU.
@@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures)
num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
&failures);
}
+ num_tests += do_kmem_cache_size_bulk(size, &failures);
}
REPORT_FAILURES_IN_FN();
*total_failures += failures;
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index e365ace06538..5ff04d8fe971 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size)
static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
{
int ret = 0;
- size_t start, end, i;
- size_t zero_start = size / 4;
- size_t zero_end = size - zero_start;
+ size_t start, end, i, zero_start, zero_end;
+
+ if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+ return -EINVAL;
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
/*
- * We conduct a series of check_nonzero_user() tests on a block of memory
- * with the following byte-pattern (trying every possible [start,end]
- * pair):
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
*
* [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
*
- * And we verify that check_nonzero_user() acts identically to memchr_inv().
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
*/
memset(kmem, 0x0, size);
@@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem,
size_t ksize, usize;
umem_src = kmalloc(size, GFP_KERNEL);
- if ((ret |= test(umem_src == NULL, "kmalloc failed")))
+ ret = test(umem_src == NULL, "kmalloc failed");
+ if (ret)
goto out_free;
expected = kmalloc(size, GFP_KERNEL);
- if ((ret |= test(expected == NULL, "kmalloc failed")))
+ ret = test(expected == NULL, "kmalloc failed");
+ if (ret)
goto out_free;
/* Fill umem with a fixed byte pattern. */
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index cc00364bd2c2..9fe698ff62ec 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO
help
This config option enables the compat VDSO layer.
-config CROSS_COMPILE_COMPAT_VDSO
- string "32 bit Toolchain prefix for compat vDSO"
- default ""
- depends on GENERIC_COMPAT_VDSO
- help
- Defines the cross-compiler prefix for compiling compat vDSO.
- If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
- 32 bit, it does not need to define this parameter.
-
endif
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d9daa3e422d0..c360f6a6c844 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -239,8 +239,8 @@ static int __init default_bdi_init(void)
{
int err;
- bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
- WQ_UNBOUND | WQ_SYSFS, 0);
+ bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
+ WQ_SYSFS, 0);
if (!bdi_wq)
return -ENOMEM;
diff --git a/mm/compaction.c b/mm/compaction.c
index ce08b39d85d4..672d3c78c6ab 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
/* Ensure the start of the pageblock or zone is online and valid */
block_pfn = pageblock_start_pfn(pfn);
- block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+ block_pfn = max(block_pfn, zone->zone_start_pfn);
+ block_page = pfn_to_online_page(block_pfn);
if (block_page) {
page = block_page;
pfn = block_pfn;
}
/* Ensure the end of the pageblock or zone is online and valid */
- block_pfn += pageblock_nr_pages;
+ block_pfn = pageblock_end_pfn(pfn) - 1;
block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
end_page = pfn_to_online_page(block_pfn);
if (!end_page)
@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
page += (1 << PAGE_ALLOC_COSTLY_ORDER);
pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
- } while (page < end_page);
+ } while (page <= end_page);
return false;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 1146fcfa3215..85b7d087eb45 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -40,6 +40,7 @@
#include <linux/rmap.h>
#include <linux/delayacct.h>
#include <linux/psi.h>
+#include <linux/ramfs.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
diff --git a/mm/gup.c b/mm/gup.c
index 23a9f9c9d377..8f236a335ae9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1973,7 +1973,8 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
}
static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
unsigned long pte_end;
struct page *head, *page;
@@ -1986,7 +1987,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
pte = READ_ONCE(*ptep);
- if (!pte_access_permitted(pte, write))
+ if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;
/* hugepages are never "special" */
@@ -2023,7 +2024,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
}
static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned int pdshift, unsigned long end, int write,
+ unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
pte_t *ptep;
@@ -2033,7 +2034,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
ptep = hugepte_offset(hugepd, addr, pdshift);
do {
next = hugepte_addr_end(addr, end, sz);
- if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
return 0;
} while (ptep++, addr = next, addr != end);
@@ -2041,7 +2042,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
}
#else
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end, int write,
+ unsigned int pdshift, unsigned long end, unsigned int flags,
struct page **pages, int *nr)
{
return 0;
@@ -2049,7 +2050,8 @@ static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
#endif /* CONFIG_ARCH_HAS_HUGEPD */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
- unsigned long end, unsigned int flags, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c5cb6dcd6c69..13cc93785006 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2789,8 +2789,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
- if (mapping)
- __dec_node_page_state(page, NR_SHMEM_THPS);
+ if (mapping) {
+ if (PageSwapBacked(page))
+ __dec_node_page_state(page, NR_SHMEM_THPS);
+ else
+ __dec_node_page_state(page, NR_FILE_THPS);
+ }
+
spin_unlock(&ds_queue->split_queue_lock);
__split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ef37c85423a5..b45a95363a84 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
struct page *page;
for (i = start_pfn; i < end_pfn; i++) {
- if (!pfn_valid(i))
+ page = pfn_to_online_page(i);
+ if (!page)
return false;
- page = pfn_to_page(i);
-
if (page_zone(page) != z)
return false;
diff --git a/mm/init-mm.c b/mm/init-mm.c
index fb1e15028ef0..19603302a77f 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/cpumask.h>
+#include <linux/mman.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 03a8d84badad..244607663363 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -527,6 +527,16 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
}
/*
+ * Remove an object from the object_tree_root and object_list. Must be called
+ * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ */
+static void __remove_object(struct kmemleak_object *object)
+{
+ rb_erase(&object->rb_node, &object_tree_root);
+ list_del_rcu(&object->object_list);
+}
+
+/*
* Look up an object in the object search tree and remove it from both
* object_tree_root and object_list. The returned object's use_count should be
* at least 1, as initially set by create_object().
@@ -538,10 +548,8 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, alias);
- if (object) {
- rb_erase(&object->rb_node, &object_tree_root);
- list_del_rcu(&object->object_list);
- }
+ if (object)
+ __remove_object(object);
write_unlock_irqrestore(&kmemleak_lock, flags);
return object;
@@ -1834,12 +1842,16 @@ static const struct file_operations kmemleak_fops = {
static void __kmemleak_do_cleanup(void)
{
- struct kmemleak_object *object;
+ struct kmemleak_object *object, *tmp;
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list)
- delete_object_full(object->pointer);
- rcu_read_unlock();
+ /*
+ * Kmemleak has already been disabled, no need for RCU list traversal
+ * or kmemleak_lock held.
+ */
+ list_for_each_entry_safe(object, tmp, &object_list, object_list) {
+ __remove_object(object);
+ __delete_object(object);
+ }
}
/*
diff --git a/mm/memblock.c b/mm/memblock.c
index 7d4f61ae666a..c4b16cae2bc9 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
align = SMP_CACHE_BYTES;
}
- if (end > memblock.current_limit)
- end = memblock.current_limit;
-
again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
@@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
+ if (max_addr > memblock.current_limit)
+ max_addr = memblock.current_limit;
+
alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
/* retry allocation without lower limit */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c313c49074ca..363106578876 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1567,6 +1567,11 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
return max;
}
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+ return page_counter_read(&memcg->memory);
+}
+
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order)
{
@@ -5415,6 +5420,8 @@ static int mem_cgroup_move_account(struct page *page,
struct mem_cgroup *from,
struct mem_cgroup *to)
{
+ struct lruvec *from_vec, *to_vec;
+ struct pglist_data *pgdat;
unsigned long flags;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
int ret;
@@ -5438,11 +5445,15 @@ static int mem_cgroup_move_account(struct page *page,
anon = PageAnon(page);
+ pgdat = page_pgdat(page);
+ from_vec = mem_cgroup_lruvec(pgdat, from);
+ to_vec = mem_cgroup_lruvec(pgdat, to);
+
spin_lock_irqsave(&from->move_lock, flags);
if (!anon && page_mapped(page)) {
- __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
- __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
+ __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
}
/*
@@ -5454,14 +5465,14 @@ static int mem_cgroup_move_account(struct page *page,
struct address_space *mapping = page_mapping(page);
if (mapping_cap_account_dirty(mapping)) {
- __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
- __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
+ __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
}
}
if (PageWriteback(page)) {
- __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
- __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
+ __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7ef849da8278..3151c87dff73 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -199,7 +199,6 @@ struct to_kill {
struct task_struct *tsk;
unsigned long addr;
short size_shift;
- char addr_valid;
};
/*
@@ -324,22 +323,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
}
}
tk->addr = page_address_in_vma(p, vma);
- tk->addr_valid = 1;
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(p, vma);
else
tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
/*
- * In theory we don't have to kill when the page was
- * munmaped. But it could be also a mremap. Since that's
- * likely very rare kill anyways just out of paranoia, but use
- * a SIGKILL because the error is not contained anymore.
+ * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+ * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+ * so "tk->size_shift == 0" effectively checks no mapping on
+ * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+ * to a process' address space, it's possible not all N VMAs
+ * contain mappings for the page, but at least one VMA does.
+ * Only deliver SIGBUS with payload derived from the VMA that
+ * has a mapping for the page.
*/
- if (tk->addr == -EFAULT || tk->size_shift == 0) {
+ if (tk->addr == -EFAULT) {
pr_info("Memory failure: Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
- tk->addr_valid = 0;
+ } else if (tk->size_shift == 0) {
+ kfree(tk);
+ return;
}
get_task_struct(tsk);
tk->tsk = tsk;
@@ -366,7 +370,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
* make sure the process doesn't catch the
* signal and then access the memory. Just kill it.
*/
- if (fail || tk->addr_valid == 0) {
+ if (fail || tk->addr == -EFAULT) {
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1253,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
- if (!pfn_valid(pfn)) {
+ p = pfn_to_online_page(pfn);
+ if (!p) {
+ if (pfn_valid(pfn)) {
+ pgmap = get_dev_pagemap(pfn, NULL);
+ if (pgmap)
+ return memory_failure_dev_pagemap(pfn, flags,
+ pgmap);
+ }
pr_err("Memory failure: %#lx: memory outside kernel control\n",
pfn);
return -ENXIO;
}
- pgmap = get_dev_pagemap(pfn, NULL);
- if (pgmap)
- return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
- p = pfn_to_page(pfn);
if (PageHuge(p))
return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b1be791f772d..df570e5c71cc 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -436,67 +436,25 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
zone_span_writeunlock(zone);
}
-static void shrink_pgdat_span(struct pglist_data *pgdat,
- unsigned long start_pfn, unsigned long end_pfn)
+static void update_pgdat_span(struct pglist_data *pgdat)
{
- unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
- unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
- unsigned long pgdat_end_pfn = p;
- unsigned long pfn;
- int nid = pgdat->node_id;
-
- if (pgdat_start_pfn == start_pfn) {
- /*
- * If the section is smallest section in the pgdat, it need
- * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
- * In this case, we find second smallest valid mem_section
- * for shrinking zone.
- */
- pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
- pgdat_end_pfn);
- if (pfn) {
- pgdat->node_start_pfn = pfn;
- pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
- }
- } else if (pgdat_end_pfn == end_pfn) {
- /*
- * If the section is biggest section in the pgdat, it need
- * shrink pgdat->node_spanned_pages.
- * In this case, we find second biggest valid mem_section for
- * shrinking zone.
- */
- pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
- start_pfn);
- if (pfn)
- pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
- }
-
- /*
- * If the section is not biggest or smallest mem_section in the pgdat,
- * it only creates a hole in the pgdat. So in this case, we need not
- * change the pgdat.
- * But perhaps, the pgdat has only hole data. Thus it check the pgdat
- * has only hole or not.
- */
- pfn = pgdat_start_pfn;
- for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
- if (unlikely(!pfn_valid(pfn)))
- continue;
-
- if (pfn_to_nid(pfn) != nid)
- continue;
+ unsigned long node_start_pfn = 0, node_end_pfn = 0;
+ struct zone *zone;
- /* Skip range to be removed */
- if (pfn >= start_pfn && pfn < end_pfn)
- continue;
+ for (zone = pgdat->node_zones;
+ zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
+ unsigned long zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
- /* If we find valid section, we have nothing to do */
- return;
+ /* No need to lock the zones, they can't change. */
+ if (zone_end_pfn > node_end_pfn)
+ node_end_pfn = zone_end_pfn;
+ if (zone->zone_start_pfn < node_start_pfn)
+ node_start_pfn = zone->zone_start_pfn;
}
- /* The pgdat has no valid section */
- pgdat->node_start_pfn = 0;
- pgdat->node_spanned_pages = 0;
+ pgdat->node_start_pfn = node_start_pfn;
+ pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
static void __remove_zone(struct zone *zone, unsigned long start_pfn,
@@ -507,7 +465,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
- shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
+ update_pgdat_span(pgdat);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 32c79b51af86..03ccbdfeb697 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -13,8 +13,6 @@
#include <linux/xarray.h>
static DEFINE_XARRAY(pgmap_array);
-#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
-#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
@@ -105,6 +103,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
void memunmap_pages(struct dev_pagemap *pgmap)
{
struct resource *res = &pgmap->res;
+ struct page *first_page;
unsigned long pfn;
int nid;
@@ -113,14 +112,16 @@ void memunmap_pages(struct dev_pagemap *pgmap)
put_page(pfn_to_page(pfn));
dev_pagemap_cleanup(pgmap);
+ /* make sure to access a memmap that was actually initialized */
+ first_page = pfn_to_page(pfn_first(pgmap));
+
/* pages are dead and unused, undo the arch mapping */
- nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+ nid = page_to_nid(first_page);
mem_hotplug_begin();
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- pfn = PHYS_PFN(res->start);
- __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
- PHYS_PFN(resource_size(res)), NULL);
+ __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+ PHYS_PFN(resource_size(res)), NULL);
} else {
arch_remove_memory(nid, res->start, resource_size(res),
pgmap_altmap(pgmap));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 15c2050c629b..ecc3dbad606b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1175,11 +1175,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
- arch_free_page(page, order);
if (want_init_on_free())
kernel_init_free_pages(page, 1 << order);
kernel_poison_pages(page, 1 << order, 0);
+ /*
+ * arch_free_page() can make the page's contents inaccessible. s390
+ * does this. So nothing which can access the page's contents should
+ * happen after this.
+ */
+ arch_free_page(page, order);
+
if (debug_pagealloc_enabled())
kernel_map_pages(page, 1 << order, 0);
@@ -4467,12 +4473,14 @@ retry_cpuset:
if (page)
goto got_pg;
- if (order >= pageblock_order && (gfp_mask & __GFP_IO)) {
+ if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
+ !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
/*
* If allocating entire pageblock(s) and compaction
* failed because all zones are below low watermarks
* or is prohibited because it recently failed at this
- * order, fail immediately.
+ * order, fail immediately unless the allocator has
+ * requested compaction and reclaim retry.
*
* Reclaim is
* - potentially very expensive because zones are far
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 5f5769c7db3b..4ade843ff588 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = {
#endif
};
+unsigned long page_ext_size = sizeof(struct page_ext);
+
static unsigned long total_usage;
-static unsigned long extra_mem;
static bool __init invoke_need_callbacks(void)
{
@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void)
for (i = 0; i < entries; i++) {
if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
- page_ext_ops[i]->offset = sizeof(struct page_ext) +
- extra_mem;
- extra_mem += page_ext_ops[i]->size;
+ page_ext_ops[i]->offset = page_ext_size;
+ page_ext_size += page_ext_ops[i]->size;
need = true;
}
}
@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void)
}
}
-static unsigned long get_entry_size(void)
-{
- return sizeof(struct page_ext) + extra_mem;
-}
-
static inline struct page_ext *get_entry(void *base, unsigned long index)
{
- return base + get_entry_size() * index;
+ return base + page_ext_size * index;
}
#if !defined(CONFIG_SPARSEMEM)
@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid)
!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
nr_pages += MAX_ORDER_NR_PAGES;
- table_size = get_entry_size() * nr_pages;
+ table_size = page_ext_size * nr_pages;
base = memblock_alloc_try_nid(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
if (section->page_ext)
return 0;
- table_size = get_entry_size() * PAGES_PER_SECTION;
+ table_size = page_ext_size * PAGES_PER_SECTION;
base = alloc_page_ext(table_size, nid);
/*
@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
* we need to apply a mask.
*/
pfn &= PAGE_SECTION_MASK;
- section->page_ext = (void *)base - get_entry_size() * pfn;
+ section->page_ext = (void *)base - page_ext_size * pfn;
total_usage += table_size;
return 0;
}
@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
struct page *page = virt_to_page(addr);
size_t table_size;
- table_size = get_entry_size() * PAGES_PER_SECTION;
+ table_size = page_ext_size * PAGES_PER_SECTION;
BUG_ON(PageReserved(page));
kmemleak_free(addr);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index dee931184788..18ecde9f45b2 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -24,12 +24,10 @@ struct page_owner {
short last_migrate_reason;
gfp_t gfp_mask;
depot_stack_handle_t handle;
-#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t free_handle;
-#endif
};
-static bool page_owner_disabled = true;
+static bool page_owner_enabled = false;
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
static depot_stack_handle_t dummy_handle;
@@ -44,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
return -EINVAL;
if (strcmp(buf, "on") == 0)
- page_owner_disabled = false;
+ page_owner_enabled = true;
return 0;
}
@@ -52,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
static bool need_page_owner(void)
{
- if (page_owner_disabled)
- return false;
-
- return true;
+ return page_owner_enabled;
}
static __always_inline depot_stack_handle_t create_dummy_stack(void)
@@ -84,7 +79,7 @@ static noinline void register_early_stack(void)
static void init_page_owner(void)
{
- if (page_owner_disabled)
+ if (!page_owner_enabled)
return;
register_dummy_stack();
@@ -148,25 +143,19 @@ void __reset_page_owner(struct page *page, unsigned int order)
{
int i;
struct page_ext *page_ext;
-#ifdef CONFIG_DEBUG_PAGEALLOC
depot_stack_handle_t handle = 0;
struct page_owner *page_owner;
- if (debug_pagealloc_enabled())
- handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
-#endif
+ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
for (i = 0; i < (1 << order); i++) {
- page_ext = lookup_page_ext(page + i);
- if (unlikely(!page_ext))
- continue;
- __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (debug_pagealloc_enabled()) {
- page_owner = get_page_owner(page_ext);
- page_owner->free_handle = handle;
- }
-#endif
+ __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+ page_owner = get_page_owner(page_ext);
+ page_owner->free_handle = handle;
+ page_ext = page_ext_next(page_ext);
}
}
@@ -184,9 +173,9 @@ static inline void __set_page_owner_handle(struct page *page,
page_owner->gfp_mask = gfp_mask;
page_owner->last_migrate_reason = -1;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
- page_ext = lookup_page_ext(page + i);
+ page_ext = page_ext_next(page_ext);
}
}
@@ -224,12 +213,10 @@ void __split_page_owner(struct page *page, unsigned int order)
if (unlikely(!page_ext))
return;
- page_owner = get_page_owner(page_ext);
- page_owner->order = 0;
- for (i = 1; i < (1 << order); i++) {
- page_ext = lookup_page_ext(page + i);
+ for (i = 0; i < (1 << order); i++) {
page_owner = get_page_owner(page_ext);
page_owner->order = 0;
+ page_ext = page_ext_next(page_ext);
}
}
@@ -260,7 +247,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
* the new page, which will be freed.
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
- __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -284,7 +271,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
* not matter as the mixed block count will still be correct
*/
for (; pfn < end_pfn; ) {
- if (!pfn_valid(pfn)) {
+ page = pfn_to_online_page(pfn);
+ if (!page) {
pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
continue;
}
@@ -292,13 +280,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
block_end_pfn = min(block_end_pfn, end_pfn);
- page = pfn_to_page(pfn);
pageblock_mt = get_pageblock_migratetype(page);
for (; pfn < block_end_pfn; pfn++) {
if (!pfn_valid_within(pfn))
continue;
+ /* The pageblock is online, no need to recheck. */
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
@@ -320,7 +308,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
if (unlikely(!page_ext))
continue;
- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
@@ -435,7 +423,7 @@ void __dump_page_owner(struct page *page)
return;
}
- if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
pr_alert("page_owner tracks the page as allocated\n");
else
pr_alert("page_owner tracks the page as freed\n");
@@ -451,7 +439,6 @@ void __dump_page_owner(struct page *page)
stack_trace_print(entries, nr_entries, 0);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
handle = READ_ONCE(page_owner->free_handle);
if (!handle) {
pr_alert("page_owner free stack trace missing\n");
@@ -460,7 +447,6 @@ void __dump_page_owner(struct page *page)
pr_alert("page last free stack trace:\n");
stack_trace_print(entries, nr_entries, 0);
}
-#endif
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -527,7 +513,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* Although we do have the info about past allocation of free
* pages, it's not relevant for current memory usage.
*/
- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
diff --git a/mm/rmap.c b/mm/rmap.c
index d9a23bb773bf..0c7b2a9400d4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -61,6 +61,7 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
diff --git a/mm/shmem.c b/mm/shmem.c
index cd570cc79c76..220be9fa2c41 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3482,6 +3482,12 @@ static int shmem_parse_options(struct fs_context *fc, void *data)
{
char *options = data;
+ if (options) {
+ int err = security_sb_eat_lsm_opts(options, &fc->security);
+ if (err)
+ return err;
+ }
+
while (options != NULL) {
char *this_char = options;
for (;;) {
diff --git a/mm/shuffle.c b/mm/shuffle.c
index 3ce12481b1dc..b3fe97fd6654 100644
--- a/mm/shuffle.c
+++ b/mm/shuffle.c
@@ -33,7 +33,7 @@ __meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
}
static bool shuffle_param;
-extern int shuffle_show(char *buffer, const struct kernel_param *kp)
+static int shuffle_show(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state)
? 'Y' : 'N');
diff --git a/mm/slab.c b/mm/slab.c
index 9df370558e5d..66e5d8032bae 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4206,9 +4206,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
/**
* __ksize -- Uninstrumented ksize.
+ * @objp: pointer to the object
*
* Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
* safety checks as ksize() with KASAN instrumentation enabled.
+ *
+ * Return: size of the actual memory used by @objp in bytes
*/
size_t __ksize(const void *objp)
{
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 6491c3a41805..f9fb27b4c843 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -178,10 +178,13 @@ static int init_memcg_params(struct kmem_cache *s,
static void destroy_memcg_params(struct kmem_cache *s)
{
- if (is_root_cache(s))
+ if (is_root_cache(s)) {
kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
- else
+ } else {
+ mem_cgroup_put(s->memcg_params.memcg);
+ WRITE_ONCE(s->memcg_params.memcg, NULL);
percpu_ref_exit(&s->memcg_params.refcnt);
+ }
}
static void free_memcg_params(struct rcu_head *rcu)
@@ -253,8 +256,6 @@ static void memcg_unlink_cache(struct kmem_cache *s)
} else {
list_del(&s->memcg_params.children_node);
list_del(&s->memcg_params.kmem_caches_node);
- mem_cgroup_put(s->memcg_params.memcg);
- WRITE_ONCE(s->memcg_params.memcg, NULL);
}
}
#else
@@ -1030,10 +1031,19 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
unsigned int useroffset, unsigned int usersize)
{
int err;
+ unsigned int align = ARCH_KMALLOC_MINALIGN;
s->name = name;
s->size = s->object_size = size;
- s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
+
+ /*
+ * For power of two sizes, guarantee natural alignment for kmalloc
+ * caches, regardless of SL*B debugging options.
+ */
+ if (is_power_of_2(size))
+ align = max(align, size);
+ s->align = calculate_alignment(flags, align, size);
+
s->useroffset = useroffset;
s->usersize = usersize;
@@ -1287,12 +1297,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
*/
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
- void *ret;
+ void *ret = NULL;
struct page *page;
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
- ret = page ? page_address(page) : NULL;
+ if (likely(page)) {
+ ret = page_address(page);
+ mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+ 1 << order);
+ }
ret = kasan_kmalloc_large(ret, size, flags);
/* As ret might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ret, size, 1, flags);
diff --git a/mm/slob.c b/mm/slob.c
index cf377beab962..fa53e9f73893 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
static void *slob_new_pages(gfp_t gfp, int order, int node)
{
- void *page;
+ struct page *page;
#ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE)
@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;
+ mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+ 1 << order);
return page_address(page);
}
static void slob_free_pages(void *b, int order)
{
+ struct page *sp = virt_to_page(b);
+
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
- free_pages((unsigned long)b, order);
+
+ mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+ -(1 << order));
+ __free_pages(sp, order);
}
/*
@@ -217,6 +224,7 @@ static void slob_free_pages(void *b, int order)
* @sp: Page to look in.
* @size: Size of the allocation.
* @align: Allocation alignment.
+ * @align_offset: Offset in the allocated block that will be aligned.
* @page_removed_from_list: Return parameter.
*
* Tries to find a chunk of memory at least @size bytes big within @page.
@@ -227,7 +235,7 @@ static void slob_free_pages(void *b, int order)
* true (set to false otherwise).
*/
static void *slob_page_alloc(struct page *sp, size_t size, int align,
- bool *page_removed_from_list)
+ int align_offset, bool *page_removed_from_list)
{
slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
@@ -236,8 +244,17 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur);
+ /*
+ * 'aligned' will hold the address of the slob block so that the
+ * address 'aligned'+'align_offset' is aligned according to the
+ * 'align' parameter. This is for kmalloc() which prepends the
+ * allocated block with its size, so that the block itself is
+ * aligned when needed.
+ */
if (align) {
- aligned = (slob_t *)ALIGN((unsigned long)cur, align);
+ aligned = (slob_t *)
+ (ALIGN((unsigned long)cur + align_offset, align)
+ - align_offset);
delta = aligned - cur;
}
if (avail >= units + delta) { /* room enough? */
@@ -281,7 +298,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
/*
* slob_alloc: entry point into the slob allocator.
*/
-static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
+ int align_offset)
{
struct page *sp;
struct list_head *slob_list;
@@ -312,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (sp->units < SLOB_UNITS(size))
continue;
- b = slob_page_alloc(sp, size, align, &page_removed_from_list);
+ b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
if (!b)
continue;
@@ -349,7 +367,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
INIT_LIST_HEAD(&sp->slab_list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
- b = slob_page_alloc(sp, size, align, &_unused);
+ b = slob_page_alloc(sp, size, align, align_offset, &_unused);
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
}
@@ -451,7 +469,7 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{
unsigned int *m;
- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;
gfp &= gfp_allowed_mask;
@@ -459,19 +477,28 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
fs_reclaim_acquire(gfp);
fs_reclaim_release(gfp);
- if (size < PAGE_SIZE - align) {
+ if (size < PAGE_SIZE - minalign) {
+ int align = minalign;
+
+ /*
+ * For power of two sizes, guarantee natural alignment for
+ * kmalloc()'d objects.
+ */
+ if (is_power_of_2(size))
+ align = max(minalign, (int) size);
+
if (!size)
return ZERO_SIZE_PTR;
- m = slob_alloc(size + align, gfp, align, node);
+ m = slob_alloc(size + minalign, gfp, align, node, minalign);
if (!m)
return NULL;
*m = size;
- ret = (void *)m + align;
+ ret = (void *)m + minalign;
trace_kmalloc_node(caller, ret,
- size, size + align, gfp, node);
+ size, size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);
@@ -521,8 +548,13 @@ void kfree(const void *block)
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
- } else
- __free_pages(sp, compound_order(sp));
+ } else {
+ unsigned int order = compound_order(sp);
+ mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+ -(1 << order));
+ __free_pages(sp, order);
+
+ }
}
EXPORT_SYMBOL(kfree);
@@ -567,7 +599,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
fs_reclaim_release(flags);
if (c->size < PAGE_SIZE) {
- b = slob_alloc(c->size, flags, c->align, node);
+ b = slob_alloc(c->size, flags, c->align, node, 0);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node);
diff --git a/mm/slub.c b/mm/slub.c
index 42c1b3af3c98..b25c807a111f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2672,6 +2672,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
}
/*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+ void *obj)
+{
+ if (unlikely(slab_want_init_on_free(s)) && obj)
+ memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+}
+
+/*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call
* overhead for requests that can be satisfied on the fastpath.
@@ -2759,12 +2770,8 @@ redo:
prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH);
}
- /*
- * If the object has been wiped upon free, make sure it's fully
- * initialized by zeroing out freelist pointer.
- */
- if (unlikely(slab_want_init_on_free(s)) && object)
- memset(object + s->offset, 0, sizeof(void *));
+
+ maybe_wipe_obj_freeptr(s, object);
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
goto error;
c = this_cpu_ptr(s->cpu_slab);
+ maybe_wipe_obj_freeptr(s, p[i]);
+
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
p[i] = object;
+ maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -3821,11 +3831,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
void *ptr = NULL;
+ unsigned int order = get_order(size);
flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, get_order(size));
- if (page)
+ page = alloc_pages_node(node, flags, order);
+ if (page) {
ptr = page_address(page);
+ mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+ 1 << order);
+ }
return kmalloc_large_node_hook(ptr, size, flags);
}
@@ -3951,9 +3965,13 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
+ unsigned int order = compound_order(page);
+
BUG_ON(!PageCompound(page));
kfree_hook(object);
- __free_pages(page, compound_order(page));
+ mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+ -(1 << order));
+ __free_pages(page, order);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
@@ -4838,7 +4856,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
}
}
- get_online_mems();
+ /*
+ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+ * already held which will conflict with an existing lock order:
+ *
+ * mem_hotplug_lock->slab_mutex->kernfs_mutex
+ *
+ * We don't really need mem_hotplug_lock (to hold off
+ * slab_mem_going_offline_callback) here because slab's memory hot
+ * unplug code doesn't destroy the kmem_cache->node[] data.
+ */
+
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
struct kmem_cache_node *n;
@@ -4879,7 +4907,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
- put_online_mems();
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
diff --git a/mm/sparse.c b/mm/sparse.c
index bf32de9e666b..f6891c1992b1 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -219,7 +219,7 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
-void subsection_mask_set(unsigned long *map, unsigned long pfn,
+static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
int idx = subsection_map_index(pfn);
diff --git a/mm/truncate.c b/mm/truncate.c
index 8563339041f6..dd9ebc1da356 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -592,6 +592,16 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
unlock_page(page);
continue;
}
+
+ /* Take a pin outside pagevec */
+ get_page(page);
+
+ /*
+ * Drop extra pins before trying to invalidate
+ * the huge page.
+ */
+ pagevec_remove_exceptionals(&pvec);
+ pagevec_release(&pvec);
}
ret = invalidate_inode_page(page);
@@ -602,6 +612,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
*/
if (!ret)
deactivate_file_page(page);
+ if (PageTransHuge(page))
+ put_page(page);
count += ret;
}
pagevec_remove_exceptionals(&pvec);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index f3b50811497a..4bac22fe1aa2 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -355,6 +355,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
* "hierarchy" or "local").
*
* To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
*/
int vmpressure_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args)
@@ -362,7 +365,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
struct vmpressure_event *ev;
enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
- enum vmpressure_levels level = -1;
+ enum vmpressure_levels level;
char *spec, *spec_orig;
char *token;
int ret = 0;
@@ -375,20 +378,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
/* Find required level */
token = strsep(&spec, ",");
- level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
- if (level < 0) {
- ret = level;
+ ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+ if (ret < 0)
goto out;
- }
+ level = ret;
/* Find optional mode */
token = strsep(&spec, ",");
if (token) {
- mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
- if (mode < 0) {
- ret = mode;
+ ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+ if (ret < 0)
goto out;
- }
+ mode = ret;
}
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -404,6 +405,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
mutex_lock(&vmpr->events_lock);
list_add(&ev->node, &vmpr->events);
mutex_unlock(&vmpr->events_lock);
+ ret = 0;
out:
kfree(spec_orig);
return ret;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e5d52d6a24af..ee4eecc7e1c2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -351,12 +351,13 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
*/
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
{
- unsigned long lru_size;
+ unsigned long lru_size = 0;
int zid;
- if (!mem_cgroup_disabled())
- lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
- else
+ if (!mem_cgroup_disabled()) {
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+ } else
lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
@@ -932,10 +933,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
* Note that if SetPageDirty is always performed via set_page_dirty,
* and thus under the i_pages lock, then this ordering is not required.
*/
- if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
- refcount = 1 + HPAGE_PMD_NR;
- else
- refcount = 2;
+ refcount = 1 + compound_nr(page);
if (!page_ref_freeze(page, refcount))
goto cannot_free;
/* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
@@ -2459,17 +2457,70 @@ out:
*lru_pages = 0;
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
- unsigned long size;
+ unsigned long lruvec_size;
unsigned long scan;
+ unsigned long protection;
+
+ lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
+ protection = mem_cgroup_protection(memcg,
+ sc->memcg_low_reclaim);
+
+ if (protection) {
+ /*
+ * Scale a cgroup's reclaim pressure by proportioning
+ * its current usage to its memory.low or memory.min
+ * setting.
+ *
+ * This is important, as otherwise scanning aggression
+ * becomes extremely binary -- from nothing as we
+ * approach the memory protection threshold, to totally
+ * nominal as we exceed it. This results in requiring
+ * setting extremely liberal protection thresholds. It
+ * also means we simply get no protection at all if we
+ * set it too low, which is not ideal.
+ *
+ * If there is any protection in place, we reduce scan
+ * pressure by how much of the total memory used is
+ * within protection thresholds.
+ *
+ * There is one special case: in the first reclaim pass,
+ * we skip over all groups that are within their low
+ * protection. If that fails to reclaim enough pages to
+ * satisfy the reclaim goal, we come back and override
+ * the best-effort low protection. However, we still
+ * ideally want to honor how well-behaved groups are in
+ * that case instead of simply punishing them all
+ * equally. As such, we reclaim them based on how much
+ * memory they are using, reducing the scan pressure
+ * again by how much of the total memory used is under
+ * hard protection.
+ */
+ unsigned long cgroup_size = mem_cgroup_size(memcg);
+
+ /* Avoid TOCTOU with earlier protection check */
+ cgroup_size = max(cgroup_size, protection);
+
+ scan = lruvec_size - lruvec_size * protection /
+ cgroup_size;
+
+ /*
+ * Minimally target SWAP_CLUSTER_MAX pages to keep
+ * reclaim moving forwards, avoiding decremeting
+ * sc->priority further than desirable.
+ */
+ scan = max(scan, SWAP_CLUSTER_MAX);
+ } else {
+ scan = lruvec_size;
+ }
+
+ scan >>= sc->priority;
- size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
- scan = size >> sc->priority;
/*
* If the cgroup's already been deleted, make sure to
* scrape out the remaining cache.
*/
if (!scan && !mem_cgroup_online(memcg))
- scan = min(size, SWAP_CLUSTER_MAX);
+ scan = min(lruvec_size, SWAP_CLUSTER_MAX);
switch (scan_balance) {
case SCAN_EQUAL:
@@ -2489,7 +2540,7 @@ out:
case SCAN_ANON:
/* Scan one type exclusively */
if ((scan_balance == SCAN_FILE) != file) {
- size = 0;
+ lruvec_size = 0;
scan = 0;
}
break;
@@ -2498,7 +2549,7 @@ out:
BUG();
}
- *lru_pages += size;
+ *lru_pages += lruvec_size;
nr[lru] = scan;
}
}
@@ -2742,6 +2793,13 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
memcg_memory_event(memcg, MEMCG_LOW);
break;
case MEMCG_PROT_NONE:
+ /*
+ * All protection thresholds breached. We may
+ * still choose to vary the scan pressure
+ * applied based on by how much the cgroup in
+ * question has exceeded its protection
+ * thresholds (see get_scan_count).
+ */
break;
}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 05bdf90646e7..6d3d3f698ebb 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -998,9 +998,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
struct z3fold_header *zhdr;
struct page *page;
enum buddy bud;
+ bool page_claimed;
zhdr = handle_to_z3fold_header(handle);
page = virt_to_page(zhdr);
+ page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
if (test_bit(PAGE_HEADLESS, &page->private)) {
/* if a headless page is under reclaim, just leave.
@@ -1008,7 +1010,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
* has not been set before, we release this page
* immediately so we don't care about its value any more.
*/
- if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+ if (!page_claimed) {
spin_lock(&pool->lock);
list_del(&page->lru);
spin_unlock(&pool->lock);
@@ -1044,13 +1046,15 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
atomic64_dec(&pool->pages_nr);
return;
}
- if (test_bit(PAGE_CLAIMED, &page->private)) {
+ if (page_claimed) {
+ /* the page has not been claimed by us */
z3fold_page_unlock(zhdr);
return;
}
if (unlikely(PageIsolated(page)) ||
test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
z3fold_page_unlock(zhdr);
+ clear_bit(PAGE_CLAIMED, &page->private);
return;
}
if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1060,10 +1064,12 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
zhdr->cpu = -1;
kref_get(&zhdr->refcount);
do_compact_page(zhdr, true);
+ clear_bit(PAGE_CLAIMED, &page->private);
return;
}
kref_get(&zhdr->refcount);
queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
+ clear_bit(PAGE_CLAIMED, &page->private);
z3fold_page_unlock(zhdr);
}
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 8842798c29e6..506d6141e44e 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -33,6 +33,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
unsigned int hlen, ll_rs, mtu;
+ ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
struct iphdr *iph;
int err;
@@ -80,6 +81,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
if (iter.frag)
ip_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -104,6 +106,7 @@ slow_path:
goto blackhole;
}
+ skb2->tstamp = tstamp;
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4cc8dc5db2b7..c210fc116103 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -640,7 +640,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb->len += copied;
skb->truesize += truesize;
if (sk && sk->sk_type == SOCK_STREAM) {
- sk->sk_wmem_queued += truesize;
+ sk_wmem_queued_add(sk, truesize);
sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
diff --git a/net/core/filter.c b/net/core/filter.c
index ed6563622ce3..3fed5755494b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4252,12 +4252,14 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
case SO_RCVBUF:
val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_SNDBUF:
val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ WRITE_ONCE(sk->sk_sndbuf,
+ max_t(int, val * 2, SOCK_MIN_SNDBUF));
break;
case SO_MAX_PACING_RATE: /* 32bit version */
if (val != ~0U)
@@ -4274,7 +4276,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_MARK:
if (sk->sk_mark != val) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index a0e0d298c991..6d3e4821b02d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -245,7 +245,8 @@ static int __peernet2id(struct net *net, struct net *peer)
return __peernet2id_alloc(net, peer, &no);
}
-static void rtnl_net_notifyid(struct net *net, int cmd, int id);
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+ struct nlmsghdr *nlh);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
@@ -268,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
- rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
if (alive)
put_net(peer);
return id;
@@ -532,7 +533,7 @@ static void unhash_nsid(struct net *net, struct net *last)
idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
- rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
if (tmp == last)
break;
}
@@ -764,7 +765,8 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
err = alloc_netid(net, peer, nsid);
spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
- rtnl_net_notifyid(net, RTM_NEWNSID, err);
+ rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+ nlh);
err = 0;
} else if (err == -ENOSPC && nsid >= 0) {
err = -EEXIST;
@@ -1051,9 +1053,12 @@ end:
return err < 0 ? err : skb->len;
}
-static void rtnl_net_notifyid(struct net *net, int cmd, int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+ struct nlmsghdr *nlh)
{
struct net_fill_args fillargs = {
+ .portid = portid,
+ .seq = nlh ? nlh->nlmsg_seq : 0,
.cmd = cmd,
.nsid = id,
};
@@ -1068,7 +1073,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id)
if (err < 0)
goto err_out;
- rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+ rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
return;
err_out:
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index c9bb00008528..f35c2e998406 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -96,7 +96,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
- tcp_sk(sk)->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
spin_lock_bh(&fastopenq->lock);
fastopenq->qlen--;
tcp_rsk(req)->tfo_listener = false;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 529133611ea2..867e61df00db 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4415,7 +4415,7 @@ static void skb_set_err_queue(struct sk_buff *skb)
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned int)sk->sk_rcvbuf)
+ (unsigned int)READ_ONCE(sk->sk_rcvbuf))
return -ENOMEM;
skb_orphan(skb);
@@ -5477,12 +5477,14 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
* @skb: buffer
* @mpls_lse: MPLS label stack entry to push
* @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
+ * @mac_len: length of the MAC header
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+ int mac_len)
{
struct mpls_shim_hdr *lse;
int err;
@@ -5499,15 +5501,15 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
return err;
if (!skb->inner_protocol) {
- skb_set_inner_network_header(skb, skb->mac_len);
+ skb_set_inner_network_header(skb, mac_len);
skb_set_inner_protocol(skb, skb->protocol);
}
skb_push(skb, MPLS_HLEN);
memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
- skb->mac_len);
+ mac_len);
skb_reset_mac_header(skb);
- skb_set_network_header(skb, skb->mac_len);
+ skb_set_network_header(skb, mac_len);
lse = mpls_hdr(skb);
lse->label_stack_entry = mpls_lse;
@@ -5526,29 +5528,30 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
*
* @skb: buffer
* @next_proto: ethertype of header after popped MPLS header
+ * @mac_len: length of the MAC header
*
* Expects skb->data at mac header.
*
* Returns 0 on success, -errno otherwise.
*/
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
{
int err;
if (unlikely(!eth_p_mpls(skb->protocol)))
- return -EINVAL;
+ return 0;
- err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+ err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
- skb->mac_len);
+ mac_len);
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
- skb_set_network_header(skb, skb->mac_len);
+ skb_set_network_header(skb, mac_len);
if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
struct ethhdr *hdr;
diff --git a/net/core/sock.c b/net/core/sock.c
index fac2b4d80de5..a515392ba84b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+ } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
@@ -785,7 +785,8 @@ set_sndbuf:
*/
val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ WRITE_ONCE(sk->sk_sndbuf,
+ max_t(int, val * 2, SOCK_MIN_SNDBUF));
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
@@ -831,7 +832,8 @@ set_rcvbuf:
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_RCVBUFFORCE:
@@ -974,7 +976,7 @@ set_rcvbuf:
if (sock->ops->set_rcvlowat)
ret = sock->ops->set_rcvlowat(sk, val);
else
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_RCVTIMEO_OLD:
@@ -2088,8 +2090,10 @@ EXPORT_SYMBOL(sock_i_ino);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority)
{
- if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ if (force ||
+ refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
struct sk_buff *skb = alloc_skb(size, priority);
+
if (skb) {
skb_set_owner_w(skb, sk);
return skb;
@@ -2190,7 +2194,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+ if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
break;
@@ -2225,7 +2229,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+ if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
break;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -2334,8 +2338,8 @@ static void sk_leave_memory_pressure(struct sock *sk)
} else {
unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
- if (memory_pressure && *memory_pressure)
- *memory_pressure = 0;
+ if (memory_pressure && READ_ONCE(*memory_pressure))
+ WRITE_ONCE(*memory_pressure, 0);
}
}
@@ -2806,7 +2810,7 @@ static void sock_def_write_space(struct sock *sk)
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
@@ -3204,13 +3208,13 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
- mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+ mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
- mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+ mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
- mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 73002022c9d8..716d265ba8ca 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -46,7 +46,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
dst->index = index;
INIT_LIST_HEAD(&dst->list);
- list_add_tail(&dsa_tree_list, &dst->list);
+ list_add_tail(&dst->list, &dsa_tree_list);
kref_init(&dst->refcount);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index a9183543ca30..eb30fc1770de 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -906,7 +906,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
- BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+ BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
@@ -915,7 +915,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
* Also to satisfy an assertion in
* tcp_v4_destroy_sock().
*/
- tcp_sk(child)->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
}
inet_csk_destroy_sock(child);
}
@@ -934,7 +934,7 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
req->sk = child;
req->dl_next = NULL;
if (queue->rskq_accept_head == NULL)
- queue->rskq_accept_head = req;
+ WRITE_ONCE(queue->rskq_accept_head, req);
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index bbb005eb5218..7dc79b973e6e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -193,7 +193,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
struct inet_diag_meminfo minfo = {
.idiag_rmem = sk_rmem_alloc_get(sk),
- .idiag_wmem = sk->sk_wmem_queued,
+ .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
.idiag_fmem = sk->sk_forward_alloc,
.idiag_tmem = sk_wmem_alloc_get(sk),
};
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 28fca408812c..814b9b8882a0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -771,6 +771,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter;
+ ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
int err = 0;
@@ -846,6 +847,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
ip_fraglist_prepare(skb, &iter);
}
+ skb->tstamp = tstamp;
err = output(net, sk, skb);
if (!err)
@@ -900,6 +902,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
+ skb2->tstamp = tstamp;
err = output(net, sk, skb2);
if (err)
goto fail;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 14654876127e..621f83434b24 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1482,7 +1482,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
if (orig) {
- dst_dev_put(&orig->dst);
+ rt_add_uncached_list(orig);
dst_release(&orig->dst);
}
} else {
@@ -2470,14 +2470,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
int orig_oif = fl4->flowi4_oif;
unsigned int flags = 0;
struct rtable *rth;
- int err = -ENETUNREACH;
+ int err;
if (fl4->saddr) {
- rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
- ipv4_is_zeronet(fl4->saddr))
+ ipv4_is_zeronet(fl4->saddr)) {
+ rth = ERR_PTR(-EINVAL);
goto out;
+ }
+
+ rth = ERR_PTR(-ENETUNREACH);
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f98a1882e537..42187a3b82f4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
- if (tcp_memory_pressure)
+ if (READ_ONCE(tcp_memory_pressure))
return;
val = jiffies;
@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
{
unsigned long val;
- if (!tcp_memory_pressure)
+ if (!READ_ONCE(tcp_memory_pressure))
return;
val = xchg(&tcp_memory_pressure, 0);
if (val)
@@ -450,8 +450,8 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_sync_mss = tcp_sync_mss;
- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
- sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
+ WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+ WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_sockets_allocated_inc(sk);
sk->sk_route_forced_caps = NETIF_F_GSO;
@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
int target, struct sock *sk)
{
- return (tp->rcv_nxt - tp->copied_seq >= target) ||
+ return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
(sk->sk_prot->stream_memory_read ?
sk->sk_prot->stream_memory_read(sk) : false);
}
@@ -543,10 +543,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Connected or passive Fast Open socket? */
if (state != TCP_SYN_SENT &&
- (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+ (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
- if (tp->urg_seq == tp->copied_seq &&
+ if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->urg_data)
target++;
@@ -607,7 +607,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
unlock_sock_fast(sk, slow);
break;
case SIOCATMARK:
- answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
+ answ = tp->urg_data &&
+ READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
break;
case SIOCOUTQ:
if (sk->sk_state == TCP_LISTEN)
@@ -616,7 +617,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_una;
+ answ = READ_ONCE(tp->write_seq) - tp->snd_una;
break;
case SIOCOUTQNSD:
if (sk->sk_state == TCP_LISTEN)
@@ -625,7 +626,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
answ = 0;
else
- answ = tp->write_seq - tp->snd_nxt;
+ answ = READ_ONCE(tp->write_seq) -
+ READ_ONCE(tp->snd_nxt);
break;
default:
return -ENOIOCTLCMD;
@@ -657,7 +659,7 @@ static void skb_entail(struct sock *sk, struct sk_buff *skb)
tcb->sacked = 0;
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
@@ -1032,10 +1034,10 @@ new_segment:
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
- sk->sk_wmem_queued += copy;
+ sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
skb->ip_summed = CHECKSUM_PARTIAL;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
@@ -1362,7 +1364,7 @@ new_segment:
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
- tp->write_seq += copy;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
@@ -1668,9 +1670,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_eat_skb(sk, skb);
if (!desc->count)
break;
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
}
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
@@ -1699,7 +1701,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
else
cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
val = min(val, cap);
- sk->sk_rcvlowat = val ? : 1;
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
/* Check if we need to signal EPOLLIN right now */
tcp_data_ready(sk);
@@ -1709,7 +1711,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
val <<= 1;
if (val > sk->sk_rcvbuf) {
- sk->sk_rcvbuf = val;
+ WRITE_ONCE(sk->sk_rcvbuf, val);
tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
}
return 0;
@@ -1819,7 +1821,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
out:
up_read(&current->mm->mmap_sem);
if (length) {
- tp->copied_seq = seq;
+ WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
@@ -2117,7 +2119,7 @@ found_ok_skb:
if (urg_offset < used) {
if (!urg_offset) {
if (!sock_flag(sk, SOCK_URGINLINE)) {
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
urg_hole++;
offset++;
used--;
@@ -2139,7 +2141,7 @@ found_ok_skb:
}
}
- *seq += used;
+ WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
@@ -2166,7 +2168,7 @@ skip_copy:
found_fin_ok:
/* Process the FIN. */
- ++*seq;
+ WRITE_ONCE(*seq, *seq + 1);
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb);
break;
@@ -2487,7 +2489,10 @@ adjudge_to_death:
}
if (sk->sk_state == TCP_CLOSE) {
- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+ struct request_sock *req;
+
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ lockdep_sock_is_held(sk));
/* We could get here with a non-NULL req if the socket is
* aborted (e.g., closed with unread data) before 3WHS
* finishes.
@@ -2559,6 +2564,7 @@ int tcp_disconnect(struct sock *sk, int flags)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
+ u32 seq;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
@@ -2585,7 +2591,7 @@ int tcp_disconnect(struct sock *sk, int flags)
__kfree_skb(sk->sk_rx_skb_cache);
sk->sk_rx_skb_cache = NULL;
}
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
@@ -2601,9 +2607,12 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->srtt_us = 0;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
tp->rcv_rtt_last_tsecr = 0;
- tp->write_seq += tp->max_window + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+
+ seq = tp->write_seq + tp->max_window + 2;
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
+
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
@@ -2930,9 +2939,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (sk->sk_state != TCP_CLOSE)
err = -EPERM;
else if (tp->repair_queue == TCP_SEND_QUEUE)
- tp->write_seq = val;
+ WRITE_ONCE(tp->write_seq, val);
else if (tp->repair_queue == TCP_RECV_QUEUE)
- tp->rcv_nxt = val;
+ WRITE_ONCE(tp->rcv_nxt, val);
else
err = -EINVAL;
break;
@@ -3831,7 +3840,13 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
void tcp_done(struct sock *sk)
{
- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+ struct request_sock *req;
+
+ /* We might be called with a new socket, after
+ * inet_csk_prepare_forced_close() has been called
+ * so we can not use lockdep_sock_is_held(sk)
+ */
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 81a8221d650a..549506162dde 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -26,8 +26,9 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
} else if (sk->sk_type == SOCK_STREAM) {
const struct tcp_sock *tp = tcp_sk(sk);
- r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
- r->idiag_wqueue = tp->write_seq - tp->snd_una;
+ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
+ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
}
if (info)
tcp_get_info(sk, info);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 3fd451271a70..a915ade0c818 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -253,7 +253,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
*/
tp = tcp_sk(child);
- tp->fastopen_rsk = req;
+ rcu_assign_pointer(tp->fastopen_rsk, req);
tcp_rsk(req)->tfo_listener = true;
/* RFC1323: The window in SYN & SYN/ACK segments is never
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3578357abe30..a2e52ad7cdab 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -359,7 +359,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
sndmem *= nr_segs * per_mss;
if (sk->sk_sndbuf < sndmem)
- sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
+ WRITE_ONCE(sk->sk_sndbuf,
+ min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -483,8 +484,9 @@ static void tcp_clamp_window(struct sock *sk)
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_under_memory_pressure(sk) &&
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
- sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
- net->ipv4.sysctl_tcp_rmem[2]);
+ WRITE_ONCE(sk->sk_rcvbuf,
+ min(atomic_read(&sk->sk_rmem_alloc),
+ net->ipv4.sysctl_tcp_rmem[2]));
}
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -648,7 +650,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
rcvbuf = min_t(u64, rcvwin * rcvmem,
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
- sk->sk_rcvbuf = rcvbuf;
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make the window clamp follow along. */
tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
@@ -2666,7 +2668,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
struct tcp_sock *tp = tcp_sk(sk);
bool recovered = !before(tp->snd_una, tp->high_seq);
- if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
+ if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
tcp_try_undo_loss(sk, false))
return;
@@ -2990,7 +2992,7 @@ void tcp_rearm_rto(struct sock *sk)
/* If the retrans timer is currently being used by Fast Open
* for SYN-ACK retrans purpose, stay put.
*/
- if (tp->fastopen_rsk)
+ if (rcu_access_pointer(tp->fastopen_rsk))
return;
if (!tp->packets_out) {
@@ -3362,7 +3364,7 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
sock_owned_by_me((struct sock *)tp);
tp->bytes_received += delta;
- tp->rcv_nxt = seq;
+ WRITE_ONCE(tp->rcv_nxt, seq);
}
/* Update our send window.
@@ -5356,7 +5358,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
}
tp->urg_data = TCP_URG_NOTYET;
- tp->urg_seq = ptr;
+ WRITE_ONCE(tp->urg_seq, ptr);
/* Disable header prediction. */
tp->pred_flags = 0;
@@ -5932,7 +5934,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
/* Ok.. it's good. Set up sequence numbers and
* move to established.
*/
- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
@@ -5961,7 +5963,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
/* Remember, tcp_poll() does not lock socket!
* Change state from SYN-SENT only after copied_seq
* is initialized. */
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
smc_check_reset_syn(tp);
@@ -6035,8 +6037,8 @@ discard:
tp->tcp_header_len = sizeof(struct tcphdr);
}
- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
@@ -6087,6 +6089,8 @@ reset_and_undo:
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
+ struct request_sock *req;
+
tcp_try_undo_loss(sk, false);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
@@ -6096,7 +6100,9 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
* we no longer need req so release it.
*/
- reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
+ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ reqsk_fastopen_remove(sk, req, false);
/* Re-arm the timer because data may have been sent out.
* This is similar to the regular data transmission case
@@ -6171,7 +6177,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0;
- req = tp->fastopen_rsk;
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
if (req) {
bool req_stolen;
@@ -6211,7 +6218,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_try_undo_spurious_syn(sk);
tp->retrans_stamp = 0;
tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bf124b1742df..6be568334848 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -164,9 +164,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
* without appearing to create any others.
*/
if (likely(!tp->repair)) {
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
+ u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
+
+ if (!seq)
+ seq = 1;
+ WRITE_ONCE(tp->write_seq, seq);
tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
}
@@ -253,7 +255,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
if (likely(!tp->repair))
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
inet->inet_dport = usin->sin_port;
@@ -291,10 +293,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcp_seq(inet->inet_saddr,
- inet->inet_daddr,
- inet->inet_sport,
- usin->sin_port);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcp_seq(inet->inet_saddr,
+ inet->inet_daddr,
+ inet->inet_sport,
+ usin->sin_port));
tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
inet->inet_saddr,
inet->inet_daddr);
@@ -478,7 +481,7 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
icsk = inet_csk(sk);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
- fastopen = tp->fastopen_rsk;
+ fastopen = rcu_dereference(tp->fastopen_rsk);
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
@@ -1644,7 +1647,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+ u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -2121,7 +2124,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
- BUG_ON(tp->fastopen_rsk);
+ BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
@@ -2455,12 +2458,13 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
/* Because we don't lock the socket,
* we might find a transient negative value.
*/
- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
i, src, srcp, dest, destp, state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index bb140a5db8c0..c802bc80c400 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -462,6 +462,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk;
struct tcp_sock *oldtp, *newtp;
+ u32 seq;
if (!newsk)
return NULL;
@@ -475,12 +476,16 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
/* Now setup tcp_sock */
newtp->pred_flags = 0;
- newtp->rcv_wup = newtp->copied_seq =
- newtp->rcv_nxt = treq->rcv_isn + 1;
+ seq = treq->rcv_isn + 1;
+ newtp->rcv_wup = seq;
+ WRITE_ONCE(newtp->copied_seq, seq);
+ WRITE_ONCE(newtp->rcv_nxt, seq);
newtp->segs_in = 1;
- newtp->snd_sml = newtp->snd_una =
- newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+ seq = treq->snt_isn + 1;
+ newtp->snd_sml = newtp->snd_una = seq;
+ WRITE_ONCE(newtp->snd_nxt, seq);
+ newtp->snd_up = seq;
INIT_LIST_HEAD(&newtp->tsq_node);
INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
@@ -495,7 +500,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->total_retrans = req->num_retrans;
tcp_init_xmit_timers(newsk);
- newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+ WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
@@ -541,7 +546,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_req = NULL;
- newtp->fastopen_rsk = NULL;
+ RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fec6d67bfd14..0488607c5cd3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -67,7 +67,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
- tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, &sk->sk_write_queue);
tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
@@ -1196,10 +1196,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
- tp->write_seq = TCP_SKB_CB(skb)->end_seq;
+ WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
}
@@ -1333,7 +1333,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
return -ENOMEM; /* We'll just try again later. */
skb_copy_decrypted(buff, skb);
- sk->sk_wmem_queued += buff->truesize;
+ sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
nlen = skb->len - len - nsize;
buff->truesize += nlen;
@@ -1443,7 +1443,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
if (delta_truesize) {
skb->truesize -= delta_truesize;
- sk->sk_wmem_queued -= delta_truesize;
+ sk_wmem_queued_add(sk, -delta_truesize);
sk_mem_uncharge(sk, delta_truesize);
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
}
@@ -1888,7 +1888,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
return -ENOMEM;
skb_copy_decrypted(buff, skb);
- sk->sk_wmem_queued += buff->truesize;
+ sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
buff->truesize += nlen;
skb->truesize -= nlen;
@@ -2152,7 +2152,7 @@ static int tcp_mtu_probe(struct sock *sk)
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb)
return -1;
- sk->sk_wmem_queued += nskb->truesize;
+ sk_wmem_queued_add(sk, nskb->truesize);
sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk);
@@ -2482,7 +2482,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
/* Don't do any loss probe on a Fast Open connection before 3WHS
* finishes.
*/
- if (tp->fastopen_rsk)
+ if (rcu_access_pointer(tp->fastopen_rsk))
return false;
early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
@@ -3142,7 +3142,7 @@ void tcp_send_fin(struct sock *sk)
* if FIN had been sent. This is because retransmit path
* does not change tp->snd_nxt.
*/
- tp->snd_nxt++;
+ WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
return;
}
} else {
@@ -3222,7 +3222,7 @@ int tcp_send_synack(struct sock *sk)
tcp_rtx_queue_unlink_and_free(skb, sk);
__skb_header_release(nskb);
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
- sk->sk_wmem_queued += nskb->truesize;
+ sk_wmem_queued_add(sk, nskb->truesize);
sk_mem_charge(sk, nskb->truesize);
skb = nskb;
}
@@ -3426,14 +3426,14 @@ static void tcp_connect_init(struct sock *sk)
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
- tp->snd_nxt = tp->write_seq;
+ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
- tp->copied_seq = tp->rcv_nxt;
+ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
inet_csk(sk)->icsk_retransmits = 0;
@@ -3447,9 +3447,9 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
tcb->end_seq += skb->len;
__skb_header_release(skb);
- sk->sk_wmem_queued += skb->truesize;
+ sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
- tp->write_seq = tcb->end_seq;
+ WRITE_ONCE(tp->write_seq, tcb->end_seq);
tp->packets_out += tcp_skb_pcount(skb);
}
@@ -3586,11 +3586,11 @@ int tcp_connect(struct sock *sk)
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
*/
- tp->snd_nxt = tp->write_seq;
+ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
tp->pushed_seq = tp->write_seq;
buff = tcp_send_head(sk);
if (unlikely(buff)) {
- tp->snd_nxt = TCP_SKB_CB(buff)->seq;
+ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
tp->pushed_seq = TCP_SKB_CB(buff)->seq;
}
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 05be564414e9..dd5a6317a801 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -386,15 +386,13 @@ abort: tcp_write_err(sk);
* Timer for Fast Open socket to retransmit SYNACK. Note that the
* sk here is the child socket, not the parent (listener) socket.
*/
-static void tcp_fastopen_synack_timer(struct sock *sk)
+static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = icsk->icsk_syn_retries ? :
sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
struct tcp_sock *tp = tcp_sk(sk);
- struct request_sock *req;
- req = tcp_sk(sk)->fastopen_rsk;
req->rsk_ops->syn_ack_timeout(req);
if (req->num_timeout >= max_retries) {
@@ -435,11 +433,14 @@ void tcp_retransmit_timer(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct request_sock *req;
- if (tp->fastopen_rsk) {
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ if (req) {
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
- tcp_fastopen_synack_timer(sk);
+ tcp_fastopen_synack_timer(sk, req);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).
*/
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d5779d6a6065..787d9f2a6e99 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -2192,6 +2192,7 @@ static void ip6erspan_tap_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = 0;
dev->netdev_ops = &ip6erspan_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index edadee4a7e76..71827b56c006 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,6 +768,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
inet6_sk(skb->sk) : NULL;
struct ip6_frag_state state;
unsigned int mtu, hlen, nexthdr_offset;
+ ktime_t tstamp = skb->tstamp;
int hroom, err = 0;
__be32 frag_id;
u8 *prevhdr, nexthdr = 0;
@@ -855,6 +856,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, skb);
if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -913,6 +915,7 @@ slow_path:
/*
* Put this fragment into the sending queue.
*/
+ frag->tstamp = tstamp;
err = output(net, sk, frag);
if (err)
goto fail;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index a9bff556d3b2..409e79b84a83 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -119,6 +119,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
+ ktime_t tstamp = skb->tstamp;
struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0;
unsigned int mtu, hlen;
@@ -183,6 +184,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
+ skb->tstamp = tstamp;
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
@@ -215,6 +217,7 @@ slow_path:
goto blackhole;
}
+ skb2->tstamp = tstamp;
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e3d9f4559c99..4804b6dc5e65 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -215,7 +215,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
- tp->write_seq = 0;
+ WRITE_ONCE(tp->write_seq, 0);
}
sk->sk_v6_daddr = usin->sin6_addr;
@@ -311,10 +311,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (likely(!tp->repair)) {
if (!tp->write_seq)
- tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
- sk->sk_v6_daddr.s6_addr32,
- inet->inet_sport,
- inet->inet_dport);
+ WRITE_ONCE(tp->write_seq,
+ secure_tcpv6_seq(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+ inet->inet_sport,
+ inet->inet_dport));
tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
@@ -406,7 +407,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
- fastopen = tp->fastopen_rsk;
+ fastopen = rcu_dereference(tp->fastopen_rsk);
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
@@ -1895,7 +1896,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
/* Because we don't lock the socket,
* we might find a transient negative value.
*/
- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -1906,7 +1908,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
state,
- tp->write_seq - tp->snd_una,
+ READ_ONCE(tp->write_seq) - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2017b7d780f5..c74f44dfaa22 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -113,22 +113,26 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
*
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
+ *
+ * This function always consumes a reference to the skb.
*/
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
{
struct llc_sock* llc = llc_sk(sk);
- int rc = 0;
if (unlikely(llc_data_accept_state(llc->state) ||
llc->remote_busy_flag ||
llc->p_flag)) {
long timeout = sock_sndtimeo(sk, noblock);
+ int rc;
rc = llc_ui_wait_for_busy_core(sk, timeout);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
}
- if (unlikely(!rc))
- rc = llc_build_and_send_pkt(sk, skb);
- return rc;
+ return llc_build_and_send_pkt(sk, skb);
}
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
@@ -899,7 +903,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
size_t size = 0;
int rc = -EINVAL, copied = 0, hdrlen;
@@ -908,10 +912,10 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
lock_sock(sk);
if (addr) {
if (msg->msg_namelen < sizeof(*addr))
- goto release;
+ goto out;
} else {
if (llc_ui_addr_null(&llc->addr))
- goto release;
+ goto out;
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
@@ -919,7 +923,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
- goto release;
+ goto out;
}
hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
size = hdrlen + len;
@@ -928,12 +932,12 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
- goto release;
+ goto out;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (!skb)
- goto release;
+ goto out;
skb->dev = llc->dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
@@ -943,29 +947,31 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_test) {
llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_xid) {
llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
rc = -ENOPROTOOPT;
if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
+ skb = NULL;
out:
- if (rc) {
- kfree_skb(skb);
-release:
+ kfree_skb(skb);
+ if (rc)
dprintk("%s: failed sending from %02X to %02X: %d\n",
__func__, llc->laddr.lsap, llc->daddr.lsap, rc);
- }
release_sock(sk);
return rc ? : copied;
}
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 4d78375f9872..647c0554d04c 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 4ff89cb7c86f..7b620acaca9e 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
#endif
static int llc_find_offset(int state, int ev_type);
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
+static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
@@ -55,6 +55,8 @@ int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
* (executing it's actions and changing state), upper layer will be
* indicated or confirmed, if needed. Returns 0 for success, 1 for
* failure. The socket lock has to be held before calling this function.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
{
@@ -62,12 +64,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
struct llc_sock *llc = llc_sk(skb->sk);
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
- /*
- * We have to hold the skb, because llc_conn_service will kfree it in
- * the sending path and we need to look at the skb->cb, where we encode
- * llc_conn_state_ev.
- */
- skb_get(skb);
ev->ind_prim = ev->cfm_prim = 0;
/*
* Send event to state machine
@@ -75,21 +71,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
rc = llc_conn_service(skb->sk, skb);
if (unlikely(rc != 0)) {
printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
- goto out_kfree_skb;
- }
-
- if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
- /* indicate or confirm not required */
- if (!skb->next)
- goto out_kfree_skb;
goto out_skb_put;
}
- if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
- skb_get(skb);
-
switch (ev->ind_prim) {
case LLC_DATA_PRIM:
+ skb_get(skb);
llc_save_primitive(sk, skb, LLC_DATA_PRIM);
if (unlikely(sock_queue_rcv_skb(sk, skb))) {
/*
@@ -106,6 +93,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
* skb->sk pointing to the newly created struct sock in
* llc_conn_handler. -acme
*/
+ skb_get(skb);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
break;
@@ -121,7 +109,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
sk->sk_state_change(sk);
}
}
- kfree_skb(skb);
sock_put(sk);
break;
case LLC_RESET_PRIM:
@@ -130,14 +117,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset ind!\n", __func__);
- kfree_skb(skb);
break;
default:
- if (ev->ind_prim) {
+ if (ev->ind_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->ind_prim);
- kfree_skb(skb);
- }
/* No indication */
break;
}
@@ -179,25 +163,22 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
printk(KERN_INFO "%s: received a reset conf!\n", __func__);
break;
default:
- if (ev->cfm_prim) {
+ if (ev->cfm_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->cfm_prim);
- break;
- }
- goto out_skb_put; /* No confirmation */
+ /* No confirmation */
+ break;
}
-out_kfree_skb:
- kfree_skb(skb);
out_skb_put:
kfree_skb(skb);
return rc;
}
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
- return llc_conn_send_pdus(sk, skb);
+ llc_conn_send_pdus(sk);
}
/**
@@ -255,7 +236,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
@@ -296,7 +277,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
@@ -340,16 +321,12 @@ out:
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
- * @hold_skb: the skb held by caller, or NULL if does not care
*
- * Sends queued pdus to MAC layer for transmission. When @hold_skb is
- * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
- * successfully, or 1 for failure.
+ * Sends queued pdus to MAC layer for transmission.
*/
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
- int ret = 0;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -361,20 +338,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
- dev_queue_xmit(skb2);
- } else {
- bool is_target = skb == hold_skb;
- int rc;
-
- if (is_target)
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- if (is_target)
- ret = rc;
+ skb = skb2;
}
+ dev_queue_xmit(skb);
}
-
- return ret;
}
/**
@@ -846,7 +813,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
goto drop_unlock;
}
out:
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 8db03c2d5440..ad6547736c21 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -38,6 +38,8 @@
* closed and -EBUSY when sending data is not permitted in this state or
* LLC has send an I pdu with p bit set to 1 and is waiting for it's
* response.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
{
@@ -46,20 +48,22 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
struct llc_sock *llc = llc_sk(sk);
if (unlikely(llc->state == LLC_CONN_STATE_ADM))
- goto out;
+ goto out_free;
rc = -EBUSY;
if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
llc->p_flag)) {
llc->failed_data_req = 1;
- goto out;
+ goto out_free;
}
ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PRIM;
ev->prim = LLC_DATA_PRIM;
ev->prim_type = LLC_PRIM_TYPE_REQ;
skb->dev = llc->dev;
- rc = llc_conn_state_process(sk, skb);
-out:
+ return llc_conn_state_process(sk, skb);
+
+out_free:
+ kfree_skb(skb);
return rc;
}
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index a94bd56bcac6..7ae4cc684d3a 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index a7f7b8ff4729..be419062e19a 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -197,29 +197,22 @@ out:
* After executing actions of the event, upper layer will be indicated
* if needed(on receiving an UI frame). sk can be null for the
* datalink_proto case.
+ *
+ * This function always consumes a reference to the skb.
*/
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- /*
- * We have to hold the skb, because llc_sap_next_state
- * will kfree it in the sending path and we need to
- * look at the skb->cb, where we encode llc_sap_state_ev.
- */
- skb_get(skb);
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
- if (ev->ind_cfm_flag == LLC_IND) {
- if (skb->sk->sk_state == TCP_LISTEN)
- kfree_skb(skb);
- else {
- llc_save_primitive(skb->sk, skb, ev->prim);
- /* queue skb to the user. */
- if (sock_queue_rcv_skb(skb->sk, skb))
- kfree_skb(skb);
- }
+ if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
+ llc_save_primitive(skb->sk, skb, ev->prim);
+
+ /* queue skb to the user. */
+ if (sock_queue_rcv_skb(skb->sk, skb) == 0)
+ return;
}
kfree_skb(skb);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 26a2f49208b6..54dd8849d1cc 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2633,7 +2633,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
rcu_read_lock();
ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
- if (WARN_ON_ONCE(ssid == NULL))
+ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
ssid_len = 0;
else
ssid_len = ssid[1];
@@ -5233,7 +5234,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- if (!ssidie) {
+ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
kfree(assoc_data);
return -EINVAL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 768d14c9a716..0e05ff037672 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3467,9 +3467,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
/* process for all: mesh, mlme, ibss */
break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ if (is_multicast_ether_addr(mgmt->da) &&
+ !is_broadcast_ether_addr(mgmt->da))
+ return RX_DROP_MONITOR;
+
+ /* process only for station/IBSS */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ return RX_DROP_MONITOR;
+ break;
case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
if (is_multicast_ether_addr(mgmt->da) &&
!is_broadcast_ether_addr(mgmt->da))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index adf94ba1ed77..4d31d9688dc2 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -520,10 +520,33 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
return 0;
}
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *sdata_iter;
+
+ if (!ieee80211_is_radar_required(local))
+ return true;
+
+ if (!regulatory_pre_cac_allowed(local->hw.wiphy))
+ return false;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata_iter, &local->interfaces, list) {
+ if (sdata_iter->wdev.cac_started) {
+ mutex_unlock(&local->iflist_mtx);
+ return false;
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ return true;
+}
+
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- if (ieee80211_is_radar_required(local))
+ if (!__ieee80211_can_leave_ch(sdata))
return false;
if (!list_empty(&local->roc_list))
@@ -630,7 +653,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
lockdep_assert_held(&local->mtx);
- if (local->scan_req || ieee80211_is_radar_required(local))
+ if (local->scan_req)
+ return -EBUSY;
+
+ if (!__ieee80211_can_leave_ch(sdata))
return -EBUSY;
if (!ieee80211_can_scan(local, sdata)) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0c63120b2db2..5cd610b547e0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1792,8 +1792,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
if (nf_ct_is_confirmed(ct))
extra_jiffies += nfct_time_stamp;
- if (ct->timeout != extra_jiffies)
- ct->timeout = extra_jiffies;
+ if (READ_ONCE(ct->timeout) != extra_jiffies)
+ WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
if (do_acct)
nf_ct_acct_update(ct, ctinfo, skb->len);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 3572e11b6f21..1c77f520f474 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -165,7 +165,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
{
int err;
- err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
+ err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
+ skb->mac_len);
if (err)
return err;
@@ -178,7 +179,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
{
int err;
- err = skb_mpls_pop(skb, ethertype);
+ err = skb_mpls_pop(skb, ethertype, skb->mac_len);
if (err)
return err;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1091bf35a199..ecc17dabec8f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -556,6 +556,7 @@ struct rxrpc_call {
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
+ const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long ack_lost_at; /* When ACK is figured as lost */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 00c095d74145..135bf5cd8dd5 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -84,7 +84,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
smp_store_release(&b->conn_backlog_head,
(head + 1) & (size - 1));
- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
atomic_read(&conn->usage), here);
}
@@ -97,7 +97,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
call->state = RXRPC_CALL_SERVER_PREALLOC;
- trace_rxrpc_call(call, rxrpc_call_new_service,
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
atomic_read(&call->usage),
here, (const void *)user_call_ID);
@@ -307,6 +307,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
rxrpc_see_call(call);
call->conn = conn;
+ call->security = conn->security;
call->peer = rxrpc_get_peer(conn->params.peer);
call->cong_cwnd = call->peer->cong_cwnd;
return call;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 32d8dc677142..a31c18c09894 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -240,7 +240,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
if (p->intr)
__set_bit(RXRPC_CALL_IS_INTR, &call->flags);
call->tx_total_len = p->tx_total_len;
- trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+ trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
+ atomic_read(&call->usage),
here, (const void *)p->user_call_ID);
/* We need to protect a partially set up call against the user as we
@@ -290,8 +291,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
if (ret < 0)
goto error;
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
+ atomic_read(&call->usage), here, NULL);
rxrpc_start_call_timer(call);
@@ -313,8 +314,8 @@ error_dup_user_ID:
error:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+ atomic_read(&call->usage), here, ERR_PTR(ret));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
@@ -376,7 +377,8 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
if (n == 0)
return false;
if (rxrpc_queue_work(&call->processor))
- trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
+ here, NULL);
else
rxrpc_put_call(call, rxrpc_call_put_noqueue);
return true;
@@ -391,7 +393,8 @@ bool __rxrpc_queue_call(struct rxrpc_call *call)
int n = atomic_read(&call->usage);
ASSERTCMP(n, >=, 1);
if (rxrpc_queue_work(&call->processor))
- trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
+ here, NULL);
else
rxrpc_put_call(call, rxrpc_call_put_noqueue);
return true;
@@ -406,7 +409,8 @@ void rxrpc_see_call(struct rxrpc_call *call)
if (call) {
int n = atomic_read(&call->usage);
- trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
+ here, NULL);
}
}
@@ -418,7 +422,7 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&call->usage);
- trace_rxrpc_call(call, op, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, op, n, here, NULL);
}
/*
@@ -445,7 +449,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+ trace_rxrpc_call(call->debug_id, rxrpc_call_release,
+ atomic_read(&call->usage),
here, (const void *)call->flags);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
@@ -488,10 +493,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- if (conn) {
+ if (conn)
rxrpc_disconnect_call(call);
- conn->security->free_call_crypto(call);
- }
+ if (call->security)
+ call->security->free_call_crypto(call);
rxrpc_cleanup_ring(call);
_leave("");
@@ -534,12 +539,13 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
struct rxrpc_net *rxnet = call->rxnet;
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = call->debug_id;
int n;
ASSERT(call != NULL);
n = atomic_dec_return(&call->usage);
- trace_rxrpc_call(call, op, n, here, NULL);
+ trace_rxrpc_call(debug_id, op, n, here, NULL);
ASSERTCMP(n, >=, 0);
if (n == 0) {
_debug("call %d dead", call->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3f1da1b49f69..376370cd9285 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -212,7 +212,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
rxrpc_get_local(conn->params.local);
key_get(conn->params.key);
- trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
+ atomic_read(&conn->usage),
__builtin_return_address(0));
trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
_leave(" = %p", conn);
@@ -352,6 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
if (cp->exclusive) {
call->conn = candidate;
+ call->security = candidate->security;
call->security_ix = candidate->security_ix;
call->service_id = candidate->service_id;
_leave(" = 0 [exclusive %d]", candidate->debug_id);
@@ -403,6 +405,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
candidate_published:
set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
call->conn = candidate;
+ call->security = candidate->security;
call->security_ix = candidate->security_ix;
call->service_id = candidate->service_id;
spin_unlock(&local->client_conns_lock);
@@ -425,6 +428,7 @@ found_extant_conn:
spin_lock(&conn->channel_lock);
call->conn = conn;
+ call->security = conn->security;
call->security_ix = conn->security_ix;
call->service_id = conn->service_id;
list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
@@ -985,11 +989,12 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
void rxrpc_put_client_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = conn->debug_id;
int n;
do {
n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
if (n > 0)
return;
ASSERTCMP(n, >=, 0);
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index ed05b6922132..38d718e90dc6 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -269,7 +269,7 @@ bool rxrpc_queue_conn(struct rxrpc_connection *conn)
if (n == 0)
return false;
if (rxrpc_queue_work(&conn->processor))
- trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
else
rxrpc_put_connection(conn);
return true;
@@ -284,7 +284,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
if (conn) {
int n = atomic_read(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
}
}
@@ -296,7 +296,7 @@ void rxrpc_get_connection(struct rxrpc_connection *conn)
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
}
/*
@@ -310,7 +310,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
if (conn) {
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
if (n > 0)
- trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
else
conn = NULL;
}
@@ -333,10 +333,11 @@ static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = conn->debug_id;
int n;
n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
if (n == 1)
rxrpc_set_service_reap_timer(conn->params.local->rxnet,
@@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
*/
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue;
- trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
if (rxrpc_conn_is_client(conn))
BUG();
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index b30e13f6d95f..123d6ceab15c 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -134,7 +134,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
write_unlock(&rxnet->conn_lock);
- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
atomic_read(&conn->usage),
__builtin_return_address(0));
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index c97ebdc043e4..48f67a9b1037 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -147,10 +147,16 @@ void rxrpc_error_report(struct sock *sk)
{
struct sock_exterr_skb *serr;
struct sockaddr_rxrpc srx;
- struct rxrpc_local *local = sk->sk_user_data;
+ struct rxrpc_local *local;
struct rxrpc_peer *peer;
struct sk_buff *skb;
+ rcu_read_lock();
+ local = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!local)) {
+ rcu_read_unlock();
+ return;
+ }
_enter("%p{%d}", sk, local->debug_id);
/* Clear the outstanding error value on the socket so that it doesn't
@@ -160,6 +166,7 @@ void rxrpc_error_report(struct sock *sk)
skb = sock_dequeue_err_skb(sk);
if (!skb) {
+ rcu_read_unlock();
_leave("UDP socket errqueue empty");
return;
}
@@ -167,11 +174,11 @@ void rxrpc_error_report(struct sock *sk)
serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
+ rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed);
return;
}
- rcu_read_lock();
peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 9c3ac96f71cb..64830d8c1fdb 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
if (peer) {
atomic_set(&peer->usage, 1);
- peer->local = local;
+ peer->local = rxrpc_get_local(local);
INIT_HLIST_HEAD(&peer->error_targets);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
@@ -307,7 +307,6 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
- peer->local = local;
rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
@@ -382,7 +381,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
int n;
n = atomic_inc_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
return peer;
}
@@ -396,7 +395,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
if (peer) {
int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
if (n > 0)
- trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
else
peer = NULL;
}
@@ -417,6 +416,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
+ rxrpc_put_local(peer->local);
kfree_rcu(peer, rcu);
}
@@ -426,11 +426,13 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
void rxrpc_put_peer(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id;
int n;
if (peer) {
+ debug_id = peer->debug_id;
n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
if (n == 0)
__rxrpc_put_peer(peer);
}
@@ -443,13 +445,15 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = peer->debug_id;
int n;
n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
+ rxrpc_put_local(peer->local);
kfree_rcu(peer, rcu);
}
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 3b0becb12041..a4090797c9b2 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -251,8 +251,8 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
seq += subpacket;
}
- return call->conn->security->verify_packet(call, skb, offset, len,
- seq, cksum);
+ return call->security->verify_packet(call, skb, offset, len,
+ seq, cksum);
}
/*
@@ -291,7 +291,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
*_offset = offset;
*_len = len;
- call->conn->security->locate_data(call, skb, _offset, _len);
+ call->security->locate_data(call, skb, _offset, _len);
return 0;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 6a1547b270fe..813fd6888142 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -419,7 +419,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
call->tx_winsize)
sp->hdr.flags |= RXRPC_MORE_PACKETS;
- ret = conn->security->secure_packet(
+ ret = call->security->secure_packet(
call, skb, skb->mark, skb->head);
if (ret < 0)
goto out;
@@ -661,6 +661,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
case RXRPC_CALL_SERVER_PREALLOC:
case RXRPC_CALL_SERVER_SECURING:
case RXRPC_CALL_SERVER_ACCEPTING:
+ rxrpc_put_call(call, rxrpc_call_put);
ret = -EBUSY;
goto error_release_sock;
default:
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2558f00f6b3e..69d4676a402f 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -832,8 +832,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
}
static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
- [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
- .len = IFNAMSIZ - 1 },
+ [TCA_ACT_KIND] = { .type = NLA_STRING },
[TCA_ACT_INDEX] = { .type = NLA_U32 },
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
@@ -865,8 +864,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
NL_SET_ERR_MSG(extack, "TC action kind must be specified");
goto err_out;
}
- nla_strlcpy(act_name, kind, IFNAMSIZ);
-
+ if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+ NL_SET_ERR_MSG(extack, "TC action name too long");
+ goto err_out;
+ }
if (tb[TCA_ACT_COOKIE]) {
cookie = nla_memdup_cookie(tb);
if (!cookie) {
@@ -1352,11 +1353,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
size_t attr_size = 0;
- int ret = 0;
+ int loop, ret;
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
- &attr_size, true, extack);
+ for (loop = 0; loop < 10; loop++) {
+ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+ actions, &attr_size, true, extack);
+ if (ret != -EAGAIN)
+ break;
+ }
+
if (ret < 0)
return ret;
ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1406,11 +1412,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
*/
if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
-replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
extack);
- if (ret == -EAGAIN)
- goto replay;
break;
case RTM_DELACTION:
ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 9ce073a05414..08923b21e566 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -484,7 +484,11 @@ static int __init mirred_init_module(void)
return err;
pr_info("Mirror/redirect action on\n");
- return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ if (err)
+ unregister_netdevice_notifier(&mirred_device_notifier);
+
+ return err;
}
static void __exit mirred_cleanup_module(void)
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index e168df0e008a..4cf6c553bb0b 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -55,7 +55,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
__be32 new_lse;
- int ret;
+ int ret, mac_len;
tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
@@ -63,8 +63,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
/* Ensure 'data' points at mac_header prior calling mpls manipulating
* functions.
*/
- if (skb_at_tc_ingress(skb))
+ if (skb_at_tc_ingress(skb)) {
skb_push_rcsum(skb, skb->mac_len);
+ mac_len = skb->mac_len;
+ } else {
+ mac_len = skb_network_header(skb) - skb_mac_header(skb);
+ }
ret = READ_ONCE(m->tcf_action);
@@ -72,12 +76,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
switch (p->tcfm_action) {
case TCA_MPLS_ACT_POP:
- if (skb_mpls_pop(skb, p->tcfm_proto))
+ if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
goto drop;
break;
case TCA_MPLS_ACT_PUSH:
new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
- if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
+ if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
goto drop;
break;
case TCA_MPLS_ACT_MODIFY:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 64584a1df425..8717c0b26c90 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -162,11 +162,22 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
return TC_H_MAJ(first);
}
+static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
+{
+ if (kind)
+ return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
+ memset(name, 0, IFNAMSIZ);
+ return false;
+}
+
static bool tcf_proto_is_unlocked(const char *kind)
{
const struct tcf_proto_ops *ops;
bool ret;
+ if (strlen(kind) == 0)
+ return false;
+
ops = tcf_proto_lookup_ops(kind, false, NULL);
/* On error return false to take rtnl lock. Proto lookup/create
* functions will perform lookup again and properly handle errors.
@@ -1843,6 +1854,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
@@ -1899,13 +1911,19 @@ replay:
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
+
/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
* block is shared (no qdisc found), qdisc is not unlocked, classifier
* type is not specified, classifier is not unlocked.
*/
if (rtnl_held ||
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
@@ -2063,6 +2081,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
@@ -2102,13 +2121,18 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
* found), qdisc is not unlocked, classifier type is not specified,
* classifier is not unlocked.
*/
if (!prio ||
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
@@ -2216,6 +2240,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
+ char name[IFNAMSIZ];
struct tcmsg *t;
u32 protocol;
u32 prio;
@@ -2252,12 +2277,17 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (err)
return err;
+ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+ err = -EINVAL;
+ goto errout;
+ }
/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
* unlocked, classifier type is not specified, classifier is not
* unlocked.
*/
if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ !tcf_proto_is_unlocked(name)) {
rtnl_held = true;
rtnl_lock();
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 82bd14e7ac93..3177dcb17316 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -446,7 +446,7 @@ META_COLLECTOR(int_sk_wmem_queued)
*err = -1;
return;
}
- dst->value = sk->sk_wmem_queued;
+ dst->value = READ_ONCE(sk->sk_wmem_queued);
}
META_COLLECTOR(int_sk_fwd_alloc)
@@ -554,7 +554,7 @@ META_COLLECTOR(int_sk_rcvlowat)
*err = -1;
return;
}
- dst->value = sk->sk_rcvlowat;
+ dst->value = READ_ONCE(sk->sk_rcvlowat);
}
META_COLLECTOR(int_sk_rcvtimeo)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 81d58b280612..1047825d9f48 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1390,8 +1390,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
}
const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
- [TCA_KIND] = { .type = NLA_NUL_STRING,
- .len = IFNAMSIZ - 1 },
+ [TCA_KIND] = { .type = NLA_STRING },
[TCA_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct tc_estimator) },
[TCA_STAB] = { .type = NLA_NESTED },
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index cebfb65d8556..b1da5589a0c6 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -177,7 +177,7 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
parent = *p;
skb = rb_to_skb(parent);
- if (ktime_after(txtime, skb->tstamp)) {
+ if (ktime_compare(txtime, skb->tstamp) >= 0) {
p = &parent->rb_right;
leftmost = false;
} else {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 0e44039e729c..42e557d48e4e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -509,6 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb)) {
qdisc_drop(skb, sch, to_free);
+ skb = NULL;
goto finish_segs;
}
@@ -593,9 +594,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
finish_segs:
if (segs) {
unsigned int len, last_len;
- int nb = 0;
+ int nb;
- len = skb->len;
+ len = skb ? skb->len : 0;
+ nb = skb ? 1 : 0;
while (segs) {
skb2 = segs->next;
@@ -612,7 +614,10 @@ finish_segs:
}
segs = skb2;
}
- qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
+ /* Parent qdiscs accounted for 1 skb of size @prev_len */
+ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+ } else if (!skb) {
+ return NET_XMIT_DROP;
}
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 68b543f85a96..6719a65169d4 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1341,6 +1341,10 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
goto out;
}
+
+ /* Everything went ok, return success. */
+ err = 0;
+
out:
return err;
}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index fc9a4c6629ce..0851166b9175 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -175,7 +175,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5a070fb5b278..2277981559d0 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -243,7 +243,7 @@ int sctp_rcv(struct sk_buff *skb)
bh_lock_sock(sk);
}
- if (sock_owned_by_user(sk)) {
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
if (sctp_add_backlog(sk, skb)) {
bh_unlock_sock(sk);
sctp_chunk_free(chunk);
@@ -321,8 +321,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
local_bh_disable();
bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sctp_chunk_free(chunk);
else
backloged = 1;
@@ -336,7 +336,13 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
if (backloged)
return 0;
} else {
- sctp_inq_push(inqueue, chunk);
+ if (!sctp_newsk_ready(sk)) {
+ if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ return 0;
+ sctp_chunk_free(chunk);
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
}
done:
@@ -358,7 +364,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
- ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+ ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e41ed2e0ae7d..48d63956a68c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2155,7 +2155,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
case SCTP_PARAM_SET_PRIMARY:
if (ep->asconf_enable)
break;
- goto fallthrough;
+ goto unhandled;
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
@@ -2166,11 +2166,11 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (ep->prsctp_enable)
break;
- goto fallthrough;
+ goto unhandled;
case SCTP_PARAM_RANDOM:
if (!ep->auth_enable)
- goto fallthrough;
+ goto unhandled;
/* SCTP-AUTH: Secion 6.1
* If the random number is not 32 byte long the association
@@ -2187,7 +2187,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
case SCTP_PARAM_CHUNKS:
if (!ep->auth_enable)
- goto fallthrough;
+ goto unhandled;
/* SCTP-AUTH: Section 3.2
* The CHUNKS parameter MUST be included once in the INIT or
@@ -2203,7 +2203,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
case SCTP_PARAM_HMAC_ALGO:
if (!ep->auth_enable)
- goto fallthrough;
+ goto unhandled;
hmacs = (struct sctp_hmac_algo_param *)param.p;
n_elt = (ntohs(param.p->length) -
@@ -2226,7 +2226,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
retval = SCTP_IERROR_ABORT;
}
break;
-fallthrough:
+unhandled:
default:
pr_debug("%s: unrecognized param:%d for chunk:%d\n",
__func__, ntohs(param.p->type), cid);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 939b8d2595bc..5ca0ec0e823c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -9500,7 +9500,7 @@ struct proto sctp_prot = {
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp_sock),
.useroffset = offsetof(struct sctp_sock, subscribe),
.usersize = offsetof(struct sctp_sock, initmsg) -
@@ -9542,7 +9542,7 @@ struct proto sctpv6_prot = {
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp6_sock),
.useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
.usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 4ca50ddf8d16..88556f0251ab 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -213,7 +213,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
if (!lgr) {
rc = SMC_CLC_DECL_MEM;
- goto out;
+ goto ism_put_vlan;
}
lgr->is_smcd = ini->is_smcd;
lgr->sync_err = 0;
@@ -289,6 +289,9 @@ clear_llc_lnk:
smc_llc_link_clear(lnk);
free_lgr:
kfree(lgr);
+ism_put_vlan:
+ if (ini->is_smcd && ini->vlan_id)
+ smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
out:
if (rc < 0) {
if (rc == -ENOMEM)
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 413a6abf227e..97e8369002d7 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -211,8 +211,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
rc = sk_wait_event(sk, timeo,
sk->sk_err ||
sk->sk_shutdown & RCV_SHUTDOWN ||
- fcrit(conn) ||
- smc_cdc_rxed_any_close_or_senddone(conn),
+ fcrit(conn),
&wait);
remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -262,6 +261,18 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
return -EAGAIN;
}
+static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+{
+ struct smc_connection *conn = &smc->conn;
+
+ if (smc_rx_data_available(conn))
+ return true;
+ else if (conn->urg_state == SMC_URG_VALID)
+ /* we received a single urgent Byte - skip */
+ smc_rx_update_cons(smc, 0);
+ return false;
+}
+
/* smc_rx_recvmsg - receive data from RMBE
* @msg: copy data to receive buffer
* @pipe: copy data to pipe if set - indicates splice() call
@@ -303,16 +314,18 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
if (read_done >= target || (pipe && read_done))
break;
- if (atomic_read(&conn->bytes_to_rcv))
+ if (smc_rx_recvmsg_data_available(smc))
goto copy;
- else if (conn->urg_state == SMC_URG_VALID)
- /* we received a single urgent Byte - skip */
- smc_rx_update_cons(smc, 0);
if (sk->sk_shutdown & RCV_SHUTDOWN ||
- smc_cdc_rxed_any_close_or_senddone(conn) ||
- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
+ /* smc_cdc_msg_recv_action() could have run after
+ * above smc_rx_recvmsg_data_available()
+ */
+ if (smc_rx_recvmsg_data_available(smc))
+ goto copy;
break;
+ }
if (read_done) {
if (sk->sk_err ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9ac88722fa83..70e52f567b2a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1249,19 +1249,21 @@ static void xs_error_report(struct sock *sk)
{
struct sock_xprt *transport;
struct rpc_xprt *xprt;
- int err;
read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
transport = container_of(xprt, struct sock_xprt, xprt);
- err = -sk->sk_err;
- if (err == 0)
+ transport->xprt_err = -sk->sk_err;
+ if (transport->xprt_err == 0)
goto out;
dprintk("RPC: xs_error_report client %p, error=%d...\n",
- xprt, -err);
- trace_rpc_socket_error(xprt, sk->sk_socket, err);
+ xprt, -transport->xprt_err);
+ trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
+
+ /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
+ smp_mb__before_atomic();
xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -2476,7 +2478,6 @@ static void xs_wake_write(struct sock_xprt *transport)
static void xs_wake_error(struct sock_xprt *transport)
{
int sockerr;
- int sockerr_len = sizeof(sockerr);
if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
return;
@@ -2485,9 +2486,7 @@ static void xs_wake_error(struct sock_xprt *transport)
goto out;
if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
goto out;
- if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR,
- (char *)&sockerr, &sockerr_len) != 0)
- goto out;
+ sockerr = xchg(&transport->xprt_err, 0);
if (sockerr < 0)
xprt_wake_pending_tasks(&transport->xprt, sockerr);
out:
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b9f8cc328f5..f8bbc4aab213 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2119,13 +2119,13 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_in_group(hdr)))
- return sk->sk_rcvbuf;
+ return READ_ONCE(sk->sk_rcvbuf);
if (unlikely(!msg_connected(hdr)))
- return sk->sk_rcvbuf << msg_importance(hdr);
+ return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
- return sk->sk_rcvbuf;
+ return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM;
}
@@ -3790,7 +3790,7 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
- i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
+ i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
if (dqueues & TIPC_DUMP_SK_SNDQ) {
i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index a666ef8fc54e..481f7f8a1655 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -204,10 +204,14 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
return virtio_transport_get_ops()->send_pkt(pkt);
}
-static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
struct virtio_vsock_pkt *pkt)
{
+ if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
+ return false;
+
vvs->rx_bytes += pkt->len;
+ return true;
}
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -458,6 +462,9 @@ void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
vvs->buf_size_max = val;
vvs->buf_size = val;
vvs->buf_alloc = val;
+
+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+ NULL);
}
EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
@@ -876,14 +883,18 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
struct virtio_vsock_pkt *pkt)
{
struct virtio_vsock_sock *vvs = vsk->trans;
- bool free_pkt = false;
+ bool can_enqueue, free_pkt = false;
pkt->len = le32_to_cpu(pkt->hdr.len);
pkt->off = 0;
spin_lock_bh(&vvs->rx_lock);
- virtio_transport_inc_rx_pkt(vvs, pkt);
+ can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
+ if (!can_enqueue) {
+ free_pkt = true;
+ goto out;
+ }
/* Try to copy small packets into the buffer of last packet queued,
* to avoid wasting memory queueing the entire buffer with a small
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 141cdb171665..4453dd375de9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13682,7 +13682,7 @@ static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_GET_FTM_RESPONDER_STATS);
if (!hdr)
- return -ENOBUFS;
+ goto nla_put_failure;
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 420c4207ab59..446c76d44e65 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3883,6 +3883,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
return pre_cac_allowed;
}
+EXPORT_SYMBOL(regulatory_pre_cac_allowed);
void regulatory_propagate_dfs_state(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 504133d76de4..dc8f689bd469 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -156,14 +156,6 @@ bool regulatory_indoor_allowed(void);
#define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
/**
- * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
- * @wiphy: wiphy for which pre-CAC capability is checked.
-
- * Pre-CAC is allowed only in ETSI domain.
- */
-bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
-
-/**
* regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
* @wiphy - wiphy on which radar is detected and the event will be propagated
* to other available wiphys having the same DFS domain
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ff1016607f0b..aef240fdf8df 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1703,8 +1703,7 @@ cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
static void
cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
struct cfg80211_bss *nontrans_bss,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+ struct ieee80211_mgmt *mgmt, size_t len)
{
u8 *ie, *new_ie, *pos;
const u8 *nontrans_ssid, *trans_ssid, *mbssid;
@@ -1715,6 +1714,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
const struct cfg80211_bss_ies *old;
u8 cpy_len;
+ lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+
ie = mgmt->u.probe_resp.variable;
new_ie_len = ielen;
@@ -1731,23 +1732,22 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
if (!mbssid || mbssid < trans_ssid)
return;
new_ie_len -= mbssid[1];
- rcu_read_lock();
+
nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
- if (!nontrans_ssid) {
- rcu_read_unlock();
+ if (!nontrans_ssid)
return;
- }
+
new_ie_len += nontrans_ssid[1];
- rcu_read_unlock();
/* generate new ie for nontrans BSS
* 1. replace SSID with nontrans BSS' SSID
* 2. skip MBSSID IE
*/
- new_ie = kzalloc(new_ie_len, gfp);
+ new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
if (!new_ie)
return;
- new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+
+ new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
if (!new_ies)
goto out_free;
@@ -1901,6 +1901,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
&non_tx_data, gfp);
+ spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+
/* check if the res has other nontransmitting bss which is not
* in MBSSID IE
*/
@@ -1915,8 +1917,9 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
ies2 = rcu_access_pointer(tmp_bss->ies);
if (ies2->tsf < ies1->tsf)
cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
- mgmt, len, gfp);
+ mgmt, len);
}
+ spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
return res;
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index c67d7a82ab13..73fd0eae08ca 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int ret = 0;
/* call only for station! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
if (ie) {
data->flags = 1;
data->length = ie[1];
- memcpy(ssid, ie + 2, data->length);
+ if (data->length > IW_ESSID_MAX_SIZE)
+ ret = -EINVAL;
+ else
+ memcpy(ssid, ie + 2, data->length);
}
rcu_read_unlock();
} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
}
wdev_unlock(wdev);
- return 0;
+ return ret;
}
int cfg80211_mgd_wext_siwap(struct net_device *dev,
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 5c111bc3c8ea..00e782335cb0 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -55,7 +55,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
- queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+ queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
}
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index fa8fbb8fa3c8..9044073fbf22 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -305,9 +305,8 @@ out:
}
EXPORT_SYMBOL(xsk_umem_consume_tx);
-static int xsk_zc_xmit(struct sock *sk)
+static int xsk_zc_xmit(struct xdp_sock *xs)
{
- struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev = xs->dev;
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
@@ -327,11 +326,10 @@ static void xsk_destruct_skb(struct sk_buff *skb)
sock_wfree(skb);
}
-static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
- size_t total_len)
+static int xsk_generic_xmit(struct sock *sk)
{
- u32 max_batch = TX_BATCH_SIZE;
struct xdp_sock *xs = xdp_sk(sk);
+ u32 max_batch = TX_BATCH_SIZE;
bool sent_frame = false;
struct xdp_desc desc;
struct sk_buff *skb;
@@ -394,6 +392,18 @@ out:
return err;
}
+static int __xsk_sendmsg(struct sock *sk)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ if (unlikely(!(xs->dev->flags & IFF_UP)))
+ return -ENETDOWN;
+ if (unlikely(!xs->tx))
+ return -ENOBUFS;
+
+ return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
+}
+
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
@@ -402,21 +412,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (unlikely(!xsk_is_bound(xs)))
return -ENXIO;
- if (unlikely(!(xs->dev->flags & IFF_UP)))
- return -ENETDOWN;
- if (unlikely(!xs->tx))
- return -ENOBUFS;
- if (need_wait)
+ if (unlikely(need_wait))
return -EOPNOTSUPP;
- return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
+ return __xsk_sendmsg(sk);
}
static unsigned int xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait)
{
unsigned int mask = datagram_poll(file, sock, wait);
- struct xdp_sock *xs = xdp_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev;
struct xdp_umem *umem;
@@ -426,9 +433,14 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
dev = xs->dev;
umem = xs->umem;
- if (umem->need_wakeup)
- dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
- umem->need_wakeup);
+ if (umem->need_wakeup) {
+ if (dev->netdev_ops->ndo_xsk_wakeup)
+ dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
+ umem->need_wakeup);
+ else
+ /* Poll needs to drive Tx also in copy mode */
+ __xsk_sendmsg(sk);
+ }
if (xs->rx && !xskq_empty_desc(xs->rx))
mask |= POLLIN | POLLRDNORM;
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
index 7409722727ca..7048bb3594d6 100644
--- a/samples/bpf/asm_goto_workaround.h
+++ b/samples/bpf/asm_goto_workaround.h
@@ -3,7 +3,8 @@
#ifndef __ASM_GOTO_WORKAROUND_H
#define __ASM_GOTO_WORKAROUND_H
-/* this will bring in asm_volatile_goto macro definition
+/*
+ * This will bring in asm_volatile_goto and asm_inline macro definitions
* if enabled by compiler and config options.
*/
#include <linux/types.h>
@@ -13,5 +14,15 @@
#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
#endif
+/*
+ * asm_inline is defined as asm __inline in "include/linux/compiler_types.h"
+ * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not
+ * supported by CLANG.
+ */
+#ifdef asm_inline
+#undef asm_inline
+#define asm_inline asm
+#endif
+
#define volatile(x...) volatile("")
#endif
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index e39938058223..4c31b305e6ef 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -13,6 +13,7 @@
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <linux/perf_event.h>
#include "libbpf.h"
#include "bpf_load.h"
diff --git a/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci b/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci
deleted file mode 100644
index 56a2e261d61d..000000000000
--- a/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/// Use devm_platform_ioremap_resource helper which wraps
-/// platform_get_resource() and devm_ioremap_resource() together.
-///
-// Confidence: High
-// Copyright: (C) 2019 Himanshu Jha GPLv2.
-// Copyright: (C) 2019 Julia Lawall, Inria/LIP6. GPLv2.
-// Keywords: platform_get_resource, devm_ioremap_resource,
-// Keywords: devm_platform_ioremap_resource
-
-virtual patch
-virtual report
-
-@r depends on patch && !report@
-expression e1, e2, arg1, arg2, arg3;
-identifier id;
-@@
-
-(
-- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
- ... when != id
-- e1 = devm_ioremap_resource(arg3, id);
-+ e1 = devm_platform_ioremap_resource(arg1, arg2);
- ... when != id
-? id = e2
-
-@r1 depends on patch && !report@
-identifier r.id;
-type T;
-@@
-
-- T *id;
- ...when != id
-
-@r2 depends on report && !patch@
-identifier id;
-expression e1, e2, arg1, arg2, arg3;
-position j0;
-@@
-
-(
- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
- ... when != id
- e1@j0 = devm_ioremap_resource(arg3, id);
- ... when != id
-? id = e2
-
-@script:python depends on report && !patch@
-e1 << r2.e1;
-j0 << r2.j0;
-@@
-
-msg = "WARNING: Use devm_platform_ioremap_resource for %s" % (e1)
-coccilib.report.print_report(j0[0], msg)
diff --git a/scripts/coccinelle/misc/add_namespace.cocci b/scripts/coccinelle/misc/add_namespace.cocci
index c832bb6445a8..99e93a6c2e24 100644
--- a/scripts/coccinelle/misc/add_namespace.cocci
+++ b/scripts/coccinelle/misc/add_namespace.cocci
@@ -6,6 +6,8 @@
/// add a missing namespace tag to a module source file.
///
+virtual report
+
@has_ns_import@
declarer name MODULE_IMPORT_NS;
identifier virtual.ns;
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index 6d2e09a2ad2f..2fa7bb83885f 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -16,6 +16,8 @@ import sys
from linux import utils
+printk_log_type = utils.CachedType("struct printk_log")
+
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
@@ -42,9 +44,14 @@ class LxDmesg(gdb.Command):
b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
log_buf = a.tobytes() + b.tobytes()
+ length_offset = printk_log_type.get_type()['len'].bitpos // 8
+ text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
+ time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
+ text_offset = printk_log_type.get_type().sizeof
+
pos = 0
while pos < log_buf.__len__():
- length = utils.read_u16(log_buf[pos + 8:pos + 10])
+ length = utils.read_u16(log_buf, pos + length_offset)
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
@@ -52,10 +59,11 @@ class LxDmesg(gdb.Command):
pos = log_buf_2nd_half
continue
- text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
- text = log_buf[pos + 16:pos + 16 + text_len].decode(
+ text_len = utils.read_u16(log_buf, pos + text_len_offset)
+ text_start = pos + text_offset
+ text = log_buf[text_start:text_start + text_len].decode(
encoding='utf8', errors='replace')
- time_stamp = utils.read_u64(log_buf[pos:pos + 8])
+ time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
for line in text.splitlines():
msg = u"[{time:12.6f}] {line}\n".format(
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 34e40e96dee2..7b7c2fafbc68 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -15,7 +15,7 @@ import gdb
import os
import re
-from linux import modules
+from linux import modules, utils
if hasattr(gdb, 'Breakpoint'):
@@ -116,6 +116,12 @@ lx-symbols command."""
module_file = self._get_module_file(module_name)
if module_file:
+ if utils.is_target_arch('s390'):
+ # Module text is preceded by PLT stubs on s390.
+ module_arch = module['arch']
+ plt_offset = int(module_arch['plt_offset'])
+ plt_size = int(module_arch['plt_size'])
+ module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index bc67126118c4..ea94221dbd39 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -92,15 +92,16 @@ def read_memoryview(inf, start, length):
return memoryview(inf.read_memory(start, length))
-def read_u16(buffer):
+def read_u16(buffer, offset):
+ buffer_val = buffer[offset:offset + 2]
value = [0, 0]
- if type(buffer[0]) is str:
- value[0] = ord(buffer[0])
- value[1] = ord(buffer[1])
+ if type(buffer_val[0]) is str:
+ value[0] = ord(buffer_val[0])
+ value[1] = ord(buffer_val[1])
else:
- value[0] = buffer[0]
- value[1] = buffer[1]
+ value[0] = buffer_val[0]
+ value[1] = buffer_val[1]
if get_target_endianness() == LITTLE_ENDIAN:
return value[0] + (value[1] << 8)
@@ -108,18 +109,18 @@ def read_u16(buffer):
return value[1] + (value[0] << 8)
-def read_u32(buffer):
+def read_u32(buffer, offset):
if get_target_endianness() == LITTLE_ENDIAN:
- return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
+ return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16)
else:
- return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
+ return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16)
-def read_u64(buffer):
+def read_u64(buffer, offset):
if get_target_endianness() == LITTLE_ENDIAN:
- return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
+ return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32)
else:
- return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
+ return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
target_arch = None
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 442d5e2ad688..936d3ad23c83 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -166,7 +166,7 @@ struct symbol {
struct module *module;
unsigned int crc;
int crc_valid;
- const char *namespace;
+ char *namespace;
unsigned int weak:1;
unsigned int vmlinux:1; /* 1 if symbol is defined in vmlinux */
unsigned int kernel:1; /* 1 if symbol is from kernel
@@ -348,20 +348,18 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
return export_unknown;
}
-static const char *sym_extract_namespace(const char **symname)
+static char *sym_extract_namespace(const char **symname)
{
- size_t n;
- char *dupsymname;
+ char *namespace = NULL;
+ char *ns_separator;
- n = strcspn(*symname, ".");
- if (n < strlen(*symname) - 1) {
- dupsymname = NOFAIL(strdup(*symname));
- dupsymname[n] = '\0';
- *symname = dupsymname;
- return dupsymname + n + 1;
+ ns_separator = strchr(*symname, '.');
+ if (ns_separator) {
+ namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
+ *symname = ns_separator + 1;
}
- return NULL;
+ return namespace;
}
/**
@@ -375,7 +373,6 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
if (!s) {
s = new_symbol(name, mod, export);
- s->namespace = namespace;
} else {
if (!s->preloaded) {
warn("%s: '%s' exported twice. Previous export was in %s%s\n",
@@ -386,6 +383,8 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
s->module = mod;
}
}
+ free(s->namespace);
+ s->namespace = namespace ? strdup(namespace) : NULL;
s->preloaded = 0;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
@@ -672,7 +671,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
unsigned int crc;
enum export export;
bool is_crc = false;
- const char *name, *namespace;
+ const char *name;
+ char *namespace;
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
strstarts(symname, "__ksymtab"))
@@ -747,6 +747,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
name = symname + strlen("__ksymtab_");
namespace = sym_extract_namespace(&name);
sym_add_exported(name, namespace, mod, export);
+ free(namespace);
}
if (strcmp(symname, "init_module") == 0)
mod->has_init = 1;
@@ -2195,7 +2196,7 @@ static int check_exports(struct module *mod)
else
basename = mod->name;
- if (exp->namespace) {
+ if (exp->namespace && exp->namespace[0]) {
add_namespace(&mod->required_namespaces,
exp->namespace);
diff --git a/scripts/nsdeps b/scripts/nsdeps
index ac2b6031dd13..3754dac13b31 100644
--- a/scripts/nsdeps
+++ b/scripts/nsdeps
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Linux kernel symbol namespace import generator
#
@@ -41,7 +41,7 @@ generate_deps() {
for source_file in $mod_source_files; do
sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
offset=$(wc -l ${source_file}.tmp | awk '{print $1;}')
- cat $source_file | grep MODULE_IMPORT_NS | sort -u >> ${source_file}.tmp
+ cat $source_file | grep MODULE_IMPORT_NS | LANG=C sort -u >> ${source_file}.tmp
tail -n +$((offset +1)) ${source_file} | grep -v MODULE_IMPORT_NS >> ${source_file}.tmp
if ! diff -q ${source_file} ${source_file}.tmp; then
mv ${source_file}.tmp ${source_file}
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 8f0a278ce0af..74eab03e31d4 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -389,11 +389,8 @@ static int nop_mcount(Elf_Shdr const *const relhdr,
mcountsym = get_mcountsym(sym0, relp, str0);
if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
- if (make_nop) {
+ if (make_nop)
ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
- if (ret < 0)
- return -1;
- }
if (warn_on_notrace_sect && !once) {
printf("Section %s has mcount callers being ignored\n",
txtname);
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 220dae0db3f1..a2998b118ef9 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -93,7 +93,7 @@ scm_version()
# Check for mercurial and a mercurial repo.
if test -d .hg && hgid=`hg id 2>/dev/null`; then
# Do we have an tagged version? If so, latesttagdistance == 1
- if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+ if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
id=`hg log -r . --template '{latesttag}'`
printf '%s%s' -hg "$id"
else
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 725674f3276d..7d0f8f7431ff 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -801,8 +801,8 @@ struct cgroup_subsys devices_cgrp_subsys = {
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
-int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{
struct dev_cgroup *dev_cgroup;
bool rc;
@@ -824,3 +824,14 @@ int __devcgroup_check_permission(short type, u32 major, u32 minor,
return 0;
}
+
+int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
+{
+ int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
+
+ if (rc)
+ return -EPERM;
+
+ return __devcgroup_check_permission(type, major, minor, access);
+}
+EXPORT_SYMBOL(devcgroup_check_permission);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 3a29e7c24ba9..a5813c7629c1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1946,7 +1946,14 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
rc = string_to_context_struct(args->newp, NULL, s,
newc, SECSID_NULL);
if (rc == -EINVAL) {
- /* Retain string representation for later mapping. */
+ /*
+ * Retain string representation for later mapping.
+ *
+ * IMPORTANT: We need to copy the contents of oldc->str
+ * back into s again because string_to_context_struct()
+ * may have garbled it.
+ */
+ memcpy(s, oldc->str, oldc->len);
context_init(newc);
newc->str = s;
newc->len = oldc->len;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 211ca85acd8c..cfab60d88c92 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -271,6 +271,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
ret = snd_hdac_ext_bus_link_power_up(link);
/*
+ * clear the register to invalidate all the output streams
+ */
+ snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV,
+ ML_LOSIDV_STREAM_MASK, 0);
+ /*
* wait for 521usec for codec to report status
* HDA spec section 4.3 - Codec Discovery
*/
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index bca5de78e9ad..795cbda32cbb 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3474,6 +3474,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
nvhdmi_chmap_cea_alloc_validate_get_type;
spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+ codec->link_down_at_suspend = 1;
+
generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
return 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b000b36ac3c6..ce4f11659765 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5358,6 +5358,17 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
}
}
+static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
+ snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
+}
+
static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -5822,6 +5833,7 @@ enum {
ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+ ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
@@ -5869,6 +5881,7 @@ enum {
ALC225_FIXUP_WYSE_AUTO_MUTE,
ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_HEADSET_MIC,
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC299_FIXUP_PREDATOR_SPK,
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
@@ -6558,6 +6571,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
+ .chained = true,
+ .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
+ },
[ALC293_FIXUP_LENOVO_SPK_NOISE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_disable_aamix,
@@ -6912,6 +6931,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
},
+ [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
[ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -7001,17 +7029,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
- SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
@@ -7108,6 +7136,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 33cd26763c0e..ff5ab24f3bd1 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -348,6 +348,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x84;
ifnum = 0;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+ /* BOSS Katana amplifiers do not need quirks */
+ return 0;
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index a4217c1a5d01..2769360f195c 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -266,8 +266,10 @@ struct kvm_vcpu_events {
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT 28
+#define KVM_ARM_IRQ_VCPU2_MASK 0xf
#define KVM_ARM_IRQ_TYPE_SHIFT 24
-#define KVM_ARM_IRQ_TYPE_MASK 0xff
+#define KVM_ARM_IRQ_TYPE_MASK 0xf
#define KVM_ARM_IRQ_VCPU_SHIFT 16
#define KVM_ARM_IRQ_VCPU_MASK 0xff
#define KVM_ARM_IRQ_NUM_SHIFT 0
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 9a507716ae2f..67c21f9bdbad 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -325,8 +325,10 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT 28
+#define KVM_ARM_IRQ_VCPU2_MASK 0xf
#define KVM_ARM_IRQ_TYPE_SHIFT 24
-#define KVM_ARM_IRQ_TYPE_MASK 0xff
+#define KVM_ARM_IRQ_TYPE_MASK 0xf
#define KVM_ARM_IRQ_VCPU_SHIFT 16
#define KVM_ARM_IRQ_VCPU_MASK 0xff
#define KVM_ARM_IRQ_NUM_SHIFT 0
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 47104e5b47fd..436ec7636927 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -231,6 +231,12 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
#define KVM_SYNC_ETOKEN (1UL << 11)
+
+#define KVM_SYNC_S390_VALID_FIELDS \
+ (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
+ KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
+ KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
+
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index f0b0c90dd398..f01950aa7fae 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -31,6 +31,7 @@
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
+#define EXIT_REASON_INIT_SIGNAL 3
#define EXIT_REASON_PENDING_INTERRUPT 7
#define EXIT_REASON_NMI_WINDOW 8
@@ -90,6 +91,7 @@
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
+ { EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index fbf5e4a0cb9c..5d1995fd369c 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -12,7 +12,11 @@ INSTALL ?= install
CFLAGS += -Wall -O2
CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 328d05e77d9f..469dc512cca3 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
+#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
#define I915_PARAM_HUC_STATUS 42
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 2a616aa3f686..379a612f8f1d 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -13,6 +13,9 @@
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+#ifndef __KERNEL__
+#include <linux/fscrypt.h>
+#endif
/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
#if !defined(__KERNEL__)
@@ -213,57 +216,6 @@ struct fsxattr {
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
/*
- * File system encryption support
- */
-/* Policy provided via an ioctl on the topmost directory */
-#define FS_KEY_DESCRIPTOR_SIZE 8
-
-#define FS_POLICY_FLAGS_PAD_4 0x00
-#define FS_POLICY_FLAGS_PAD_8 0x01
-#define FS_POLICY_FLAGS_PAD_16 0x02
-#define FS_POLICY_FLAGS_PAD_32 0x03
-#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
-#define FS_POLICY_FLAGS_VALID 0x07
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID 0
-#define FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define FS_ENCRYPTION_MODE_AES_256_CTS 4
-#define FS_ENCRYPTION_MODE_AES_128_CBC 5
-#define FS_ENCRYPTION_MODE_AES_128_CTS 6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_ADIANTUM 9
-
-struct fscrypt_policy {
- __u8 version;
- __u8 contents_encryption_mode;
- __u8 filenames_encryption_mode;
- __u8 flags;
- __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-};
-
-#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
-#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
-
-/* Parameters for passing an encryption key into the kernel keyring */
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
-
-/* Structure that userspace passes to the kernel keyring */
-#define FS_MAX_KEY_SIZE 64
-
-struct fscrypt_key {
- __u32 mode;
- __u8 raw[FS_MAX_KEY_SIZE];
- __u32 size;
-};
-
-/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*
* Note: for historical reasons, these flags were originally used and
@@ -306,6 +258,7 @@ struct fscrypt_key {
#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */
#define FS_EXTENT_FL 0x00080000 /* Extents */
+#define FS_VERITY_FL 0x00100000 /* Verity protected inode */
#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
new file mode 100644
index 000000000000..39ccfe9311c3
--- /dev/null
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * fscrypt user API
+ *
+ * These ioctls can be used on filesystems that support fscrypt. See the
+ * "User API" section of Documentation/filesystems/fscrypt.rst.
+ */
+#ifndef _UAPI_LINUX_FSCRYPT_H
+#define _UAPI_LINUX_FSCRYPT_H
+
+#include <linux/types.h>
+
+/* Encryption policy flags */
+#define FSCRYPT_POLICY_FLAGS_PAD_4 0x00
+#define FSCRYPT_POLICY_FLAGS_PAD_8 0x01
+#define FSCRYPT_POLICY_FLAGS_PAD_16 0x02
+#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
+#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
+#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
+#define FSCRYPT_POLICY_FLAGS_VALID 0x07
+
+/* Encryption algorithms */
+#define FSCRYPT_MODE_AES_256_XTS 1
+#define FSCRYPT_MODE_AES_256_CTS 4
+#define FSCRYPT_MODE_AES_128_CBC 5
+#define FSCRYPT_MODE_AES_128_CTS 6
+#define FSCRYPT_MODE_ADIANTUM 9
+#define __FSCRYPT_MODE_MAX 9
+
+/*
+ * Legacy policy version; ad-hoc KDF and no key verification.
+ * For new encrypted directories, use fscrypt_policy_v2 instead.
+ *
+ * Careful: the .version field for this is actually 0, not 1.
+ */
+#define FSCRYPT_POLICY_V1 0
+#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8
+struct fscrypt_policy_v1 {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+};
+#define fscrypt_policy fscrypt_policy_v1
+
+/*
+ * Process-subscribed "logon" key description prefix and payload format.
+ * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead.
+ */
+#define FSCRYPT_KEY_DESC_PREFIX "fscrypt:"
+#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8
+#define FSCRYPT_MAX_KEY_SIZE 64
+struct fscrypt_key {
+ __u32 mode;
+ __u8 raw[FSCRYPT_MAX_KEY_SIZE];
+ __u32 size;
+};
+
+/*
+ * New policy version with HKDF and key verification (recommended).
+ */
+#define FSCRYPT_POLICY_V2 2
+#define FSCRYPT_KEY_IDENTIFIER_SIZE 16
+struct fscrypt_policy_v2 {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 __reserved[4];
+ __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */
+struct fscrypt_get_policy_ex_arg {
+ __u64 policy_size; /* input/output */
+ union {
+ __u8 version;
+ struct fscrypt_policy_v1 v1;
+ struct fscrypt_policy_v2 v2;
+ } policy; /* output */
+};
+
+/*
+ * v1 policy keys are specified by an arbitrary 8-byte key "descriptor",
+ * matching fscrypt_policy_v1::master_key_descriptor.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR 1
+
+/*
+ * v2 policy keys are specified by a 16-byte key "identifier" which the kernel
+ * calculates as a cryptographic hash of the key itself,
+ * matching fscrypt_policy_v2::master_key_identifier.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER 2
+
+/*
+ * Specifies a key, either for v1 or v2 policies. This doesn't contain the
+ * actual key itself; this is just the "name" of the key.
+ */
+struct fscrypt_key_specifier {
+ __u32 type; /* one of FSCRYPT_KEY_SPEC_TYPE_* */
+ __u32 __reserved;
+ union {
+ __u8 __reserved[32]; /* reserve some extra space */
+ __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+ __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+ } u;
+};
+
+/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
+struct fscrypt_add_key_arg {
+ struct fscrypt_key_specifier key_spec;
+ __u32 raw_size;
+ __u32 __reserved[9];
+ __u8 raw[];
+};
+
+/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */
+struct fscrypt_remove_key_arg {
+ struct fscrypt_key_specifier key_spec;
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY 0x00000001
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS 0x00000002
+ __u32 removal_status_flags; /* output */
+ __u32 __reserved[5];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */
+struct fscrypt_get_key_status_arg {
+ /* input */
+ struct fscrypt_key_specifier key_spec;
+ __u32 __reserved[6];
+
+ /* output */
+#define FSCRYPT_KEY_STATUS_ABSENT 1
+#define FSCRYPT_KEY_STATUS_PRESENT 2
+#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED 3
+ __u32 status;
+#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF 0x00000001
+ __u32 status_flags;
+ __u32 user_count;
+ __u32 __out_reserved[13];
+};
+
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */
+#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
+#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+
+/**********************************************************************/
+
+/* old names; don't add anything new here! */
+#ifndef __KERNEL__
+#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE
+#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4
+#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8
+#define FS_POLICY_FLAGS_PAD_16 FSCRYPT_POLICY_FLAGS_PAD_16
+#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
+#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
+#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
+#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
+#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3 /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
+#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
+#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */
+#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM
+#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX
+#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE
+#define FS_MAX_KEY_SIZE FSCRYPT_MAX_KEY_SIZE
+#endif /* !__KERNEL__ */
+
+#endif /* _UAPI_LINUX_FSCRYPT_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 5e3f12d5359e..233efbb1c81c 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -243,6 +243,8 @@ struct kvm_hyperv_exit {
#define KVM_INTERNAL_ERROR_SIMUL_EX 2
/* Encounter unexpected vm-exit due to delivery event. */
#define KVM_INTERNAL_ERROR_DELIVERY_EV 3
+/* Encounter unexpected vm-exit reason */
+#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -996,6 +998,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#define KVM_CAP_PMU_EVENT_FILTER 173
+#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index 78efe870c2b7..cf525cddeb94 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo {
#define USBDEVFS_CAP_MMAP 0x20
#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
#define USBDEVFS_CAP_CONNINFO_EX 0x80
+#define USBDEVFS_CAP_SUSPEND 0x100
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
@@ -223,5 +224,8 @@ struct usbdevfs_streams {
* extending size of the data returned.
*/
#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
+#define USBDEVFS_FORBID_SUSPEND _IO('U', 33)
+#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34)
+#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35)
#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index c6f94cffe06e..56ce6292071b 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -8,7 +8,11 @@ LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
MAKEFLAGS += --no-print-directory
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is a ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
@@ -110,6 +114,9 @@ override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+# flags specific for shared library
+SHLIB_FLAGS := -DSHARED
+
ifeq ($(VERBOSE),1)
Q =
else
@@ -126,14 +133,17 @@ all:
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
-BPF_IN := $(OUTPUT)libbpf-in.o
+SHARED_OBJDIR := $(OUTPUT)sharedobjs/
+STATIC_OBJDIR := $(OUTPUT)staticobjs/
+BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
+BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
VERSION_SCRIPT := libbpf.map
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
-GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
sort -u | wc -l)
@@ -155,7 +165,7 @@ all: fixdep
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN): force elfdep bpfdep
+$(BPF_IN_SHARED): force elfdep bpfdep
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -171,17 +181,20 @@ $(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
- $(Q)$(MAKE) $(build)=libbpf
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
+
+$(BPF_IN_STATIC): force elfdep bpfdep
+ $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
-$(OUTPUT)libbpf.a: $(BPF_IN)
+$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
@@ -197,7 +210,7 @@ check: check_abi
check_abi: $(OUTPUT)libbpf.so
@if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
- echo "Warning: Num of global symbols in $(BPF_IN)" \
+ echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \
"($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
"versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
"Please make sure all LIBBPF_API symbols are" \
@@ -255,9 +268,9 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
+ $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
*.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
- *.pc LIBBPF-CFLAGS
+ *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 2e83a34f8c79..98216a69c32f 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -34,6 +34,22 @@
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
#endif
+/* Symbol versioning is different between static and shared library.
+ * Properly versioned symbols are needed for shared library, but
+ * only the symbol of the new version is needed for static library.
+ */
+#ifdef SHARED
+# define COMPAT_VERSION(internal_name, api_name, version) \
+ asm(".symver " #internal_name "," #api_name "@" #version);
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+ asm(".symver " #internal_name "," #api_name "@@" #version);
+#else
+# define COMPAT_VERSION(internal_name, api_name, version)
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+ extern typeof(internal_name) api_name \
+ __attribute__((alias(#internal_name)));
+#endif
+
extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...)
__attribute__((format(printf, 2, 3)));
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 24fa313524fb..a902838f9fcc 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -261,8 +261,8 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
&config);
}
-asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
-asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
+DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index ed61fb3a46c0..5b2cd5e58df0 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory
LIBFILE = $(OUTPUT)libsubcmd.a
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
+
+ifeq ($(DEBUG),0)
+ ifeq ($(feature-fortify-source), 1)
+ CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
+ endif
+endif
ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
diff --git a/tools/perf/Documentation/asciidoc.conf b/tools/perf/Documentation/asciidoc.conf
index 356b23a40339..2b62ba1e72b7 100644
--- a/tools/perf/Documentation/asciidoc.conf
+++ b/tools/perf/Documentation/asciidoc.conf
@@ -71,6 +71,9 @@ ifdef::backend-docbook[]
[header]
template::[header-declarations]
<refentry>
+ifdef::perf_date[]
+<refentryinfo><date>{perf_date}</date></refentryinfo>
+endif::perf_date[]
<refmeta>
<refentrytitle>{mantitle}</refentrytitle>
<manvolnum>{manvolnum}</manvolnum>
diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt
index 4c62b0713651..52152d156ad9 100644
--- a/tools/perf/Documentation/jitdump-specification.txt
+++ b/tools/perf/Documentation/jitdump-specification.txt
@@ -36,8 +36,8 @@ III/ Jitdump file header format
Each jitdump file starts with a fixed size header containing the following fields in order:
-* uint32_t magic : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It is 0x4A695444 or 0x4454694a depending on the endianness. The field can be used to detect the endianness of the file
-* uint32_t version : a 4-byte value representing the format version. It is currently set to 2
+* uint32_t magic : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It written is as 0x4A695444. The reader will detect an endian mismatch when it reads 0x4454694a.
+* uint32_t version : a 4-byte value representing the format version. It is currently set to 1
* uint32_t total_size: size in bytes of file header
* uint32_t elf_mach : ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h)
* uint32_t pad1 : padding. Reserved for future use
diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c
index e1d4b484cc4b..2ff6cedeb9c5 100644
--- a/tools/perf/arch/arm/annotate/instructions.c
+++ b/tools/perf/arch/arm/annotate/instructions.c
@@ -37,7 +37,7 @@ static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
arm = zalloc(sizeof(*arm));
if (!arm)
- return -1;
+ return ENOMEM;
#define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)"
err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED);
@@ -59,5 +59,5 @@ out_free_call:
regfree(&arm->call_insn);
out_free_arm:
free(arm);
- return -1;
+ return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
}
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 43aa93ed8414..037e292ecd8e 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -95,7 +95,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
arm = zalloc(sizeof(*arm));
if (!arm)
- return -1;
+ return ENOMEM;
/* bl, blr */
err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED);
@@ -118,5 +118,5 @@ out_free_call:
regfree(&arm->call_insn);
out_free_arm:
free(arm);
- return -1;
+ return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
}
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index b6b7bc7e31a1..3b4cdfc5efd6 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
+#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
@@ -30,7 +31,7 @@ get_cpuid(char *buffer, size_t sz)
buffer[nb-1] = '\0';
return 0;
}
- return -1;
+ return ENOBUFS;
}
char *
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index 89bb8f2c54ce..a50e70baf918 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -164,8 +164,10 @@ static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
if (!arch->initialized) {
arch->initialized = true;
arch->associate_instruction_ops = s390__associate_ins_ops;
- if (cpuid)
- err = s390__cpuid_parse(arch, cpuid);
+ if (cpuid) {
+ if (s390__cpuid_parse(arch, cpuid))
+ err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+ }
}
return err;
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
index 8b0b018d896a..7933f6871c81 100644
--- a/tools/perf/arch/s390/util/header.c
+++ b/tools/perf/arch/s390/util/header.c
@@ -8,6 +8,7 @@
*/
#include <sys/types.h>
+#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
@@ -54,7 +55,7 @@ int get_cpuid(char *buffer, size_t sz)
sysinfo = fopen(SYSINFO, "r");
if (sysinfo == NULL)
- return -1;
+ return errno;
while ((read = getline(&line, &line_sz, sysinfo)) != -1) {
if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) {
@@ -89,7 +90,7 @@ int get_cpuid(char *buffer, size_t sz)
/* Missing manufacturer, type or model information should not happen */
if (!manufacturer[0] || !type[0] || !model[0])
- return -1;
+ return EINVAL;
/*
* Scan /proc/service_levels and return the CPU-MF counter facility
@@ -133,14 +134,14 @@ skip_sysinfo:
else
nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type,
model);
- return (nbytes >= sz) ? -1 : 0;
+ return (nbytes >= sz) ? ENOBUFS : 0;
}
char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *buf = malloc(128);
- if (buf && get_cpuid(buf, 128) < 0)
+ if (buf && get_cpuid(buf, 128))
zfree(&buf);
return buf;
}
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 44f5aba78210..7eb5621c021d 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -196,8 +196,10 @@ static int x86__annotate_init(struct arch *arch, char *cpuid)
if (arch->initialized)
return 0;
- if (cpuid)
- err = x86__cpuid_parse(arch, cpuid);
+ if (cpuid) {
+ if (x86__cpuid_parse(arch, cpuid))
+ err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+ }
arch->initialized = true;
return err;
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 662ecf84a421..aa6deb463bf3 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <sys/types.h>
+#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
@@ -58,7 +59,7 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt)
buffer[nb-1] = '\0';
return 0;
}
- return -1;
+ return ENOBUFS;
}
int
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 2227e2f42c09..58a9e0989491 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -705,14 +705,15 @@ static int process_sample_event(struct perf_tool *tool,
static int cpu_isa_config(struct perf_kvm_stat *kvm)
{
- char buf[64], *cpuid;
+ char buf[128], *cpuid;
int err;
if (kvm->live) {
err = get_cpuid(buf, sizeof(buf));
if (err != 0) {
- pr_err("Failed to look up CPU type\n");
- return err;
+ pr_err("Failed to look up CPU type: %s\n",
+ str_error_r(err, buf, sizeof(buf)));
+ return -err;
}
cpuid = buf;
} else
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 286fc70d7402..67be8d31afab 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1063,7 +1063,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
continue;
insn = 0;
- for (off = 0;; off += ilen) {
+ for (off = 0; off < (unsigned)len; off += ilen) {
uint64_t ip = start + off;
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
@@ -1074,6 +1074,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += print_srccode(thread, x.cpumode, ip);
break;
} else {
+ ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
dump_insn(&x, ip, buffer + off, len - off, &ilen));
if (ilen == 0)
@@ -1083,6 +1084,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
insn++;
}
}
+ if (off != (unsigned)len)
+ printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
}
/*
@@ -1123,6 +1126,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
goto out;
}
for (off = 0; off <= end - start; off += ilen) {
+ ilen = 0;
printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (ilen == 0)
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index e2e0f06c97d0..cea13cb987d0 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -8,6 +8,7 @@ include/uapi/drm/i915_drm.h
include/uapi/linux/fadvise.h
include/uapi/linux/fcntl.h
include/uapi/linux/fs.h
+include/uapi/linux/fscrypt.h
include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
include/uapi/linux/in.h
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
index 17fb5241928b..17fb5241928b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
index db286f19e7b6..db286f19e7b6 100644
--- a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
index 5e36bc2468d0..5e36bc2468d0 100644
--- a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
index 89e070727e1b..89e070727e1b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index bd3fc577139c..61641a3480e0 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -4,4 +4,4 @@ Family-model,Version,Filename,EventType
^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
-^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_m8561,core
+^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 9e37287da924..e2837260ca4d 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -450,12 +450,12 @@ static struct fixed {
const char *name;
const char *event;
} fixed[] = {
- { "inst_retired.any", "event=0xc0" },
- { "inst_retired.any_p", "event=0xc0" },
- { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
- { "cpu_clk_unhalted.thread", "event=0x3c" },
- { "cpu_clk_unhalted.core", "event=0x3c" },
- { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+ { "inst_retired.any", "event=0xc0,period=2000003" },
+ { "inst_retired.any_p", "event=0xc0,period=2000003" },
+ { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
+ { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
+ { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
+ { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
{ NULL, NULL},
};
diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
index dbc27199c65e..dd865e0bea12 100644
--- a/tools/perf/tests/perf-hooks.c
+++ b/tools/perf/tests/perf-hooks.c
@@ -19,12 +19,11 @@ static void sigsegv_handler(int sig __maybe_unused)
static void the_hook(void *_hook_flags)
{
int *hook_flags = _hook_flags;
- int *p = NULL;
*hook_flags = 1234;
/* Generate a segfault, test perf_hooks__recover */
- *p = 0;
+ raise(SIGSEGV);
}
int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e830eadfca2a..4036c7f7b0fb 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1631,6 +1631,19 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
+ scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
+ scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
+ scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
+ scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
+ dso->long_name);
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1662,7 +1675,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
build_id_path = strdup(filename);
if (!build_id_path)
- return -1;
+ return ENOMEM;
/*
* old style build-id cache has name of XX/XXXXXXX.. while
@@ -1713,13 +1726,13 @@ static int symbol__disassemble_bpf(struct symbol *sym,
char tpath[PATH_MAX];
size_t buf_size;
int nr_skip = 0;
- int ret = -1;
char *buf;
bfd *bfdf;
+ int ret;
FILE *s;
if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
- return -1;
+ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
sym->name, sym->start, sym->end - sym->start);
@@ -1732,8 +1745,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
assert(bfd_check_format(bfdf, bfd_object));
s = open_memstream(&buf, &buf_size);
- if (!s)
+ if (!s) {
+ ret = errno;
goto out;
+ }
init_disassemble_info(&info, s,
(fprintf_ftype) fprintf);
@@ -1742,8 +1757,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
dso->bpf_prog.id);
- if (!info_node)
+ if (!info_node) {
+ return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
goto out;
+ }
info_linear = info_node->info_linear;
sub_id = dso->bpf_prog.sub_id;
@@ -2071,11 +2088,11 @@ int symbol__annotate(struct symbol *sym, struct map *map,
int err;
if (!arch_name)
- return -1;
+ return errno;
args.arch = arch = arch__find(arch_name);
if (arch == NULL)
- return -ENOTSUP;
+ return ENOTSUP;
if (parch)
*parch = arch;
@@ -2971,7 +2988,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
notes->offsets = zalloc(size * sizeof(struct annotation_line *));
if (notes->offsets == NULL)
- return -1;
+ return ENOMEM;
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
@@ -2997,7 +3014,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
out_free_offsets:
zfree(&notes->offsets);
- return -1;
+ return err;
}
#define ANNOTATION__CFG(n) \
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d94be9140e31..d76fd0e81f46 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -370,6 +370,10 @@ enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
+ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING,
+ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP,
+ SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE,
+ SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 5591af81a070..abc7fda4a0fe 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -30,6 +30,7 @@
#include "counts.h"
#include "event.h"
#include "evsel.h"
+#include "util/env.h"
#include "util/evsel_config.h"
#include "util/evsel_fprintf.h"
#include "evlist.h"
@@ -2512,7 +2513,7 @@ struct perf_env *perf_evsel__env(struct evsel *evsel)
{
if (evsel && evsel->evlist)
return evsel->evlist->env;
- return NULL;
+ return &perf_env;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 1bdf4c6ea3e5..e3ccb0ce1938 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -395,7 +395,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
size_t size;
u16 idr_size;
const char *sym;
- uint32_t count;
+ uint64_t count;
int ret, csize, usize;
pid_t pid, tid;
struct {
@@ -418,7 +418,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
count);
@@ -529,7 +529,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
jr->move.code_index);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 8d04e3d070b1..8b14e4a7f1dc 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -233,14 +233,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
const char *prefix_dir = "";
const char *suffix_dir = "";
+ /* _UTSNAME_LENGTH is 65 */
+ char release[128];
+
char *autoconf_path;
int err;
if (!test_dir) {
- /* _UTSNAME_LENGTH is 65 */
- char release[128];
-
err = fetch_kernel_version(NULL, release,
sizeof(release));
if (err)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 5b83ed1ebbd6..eec9b282c047 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "symbol.h"
+#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
@@ -850,6 +851,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
after->start = map->end;
+ after->pgoff += map->end - pos->start;
+ assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
__map_groups__insert(pos->groups, after);
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 53f31053a27a..02460362256d 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -14,6 +14,7 @@
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
+#include "util/env.h"
#include <internal/lib.h>
#include "../perf-sys.h"
@@ -54,6 +55,11 @@ int parse_callchain_record(const char *arg __maybe_unused,
}
/*
+ * Add this one here not to drag util/env.c
+ */
+struct perf_env perf_env;
+
+/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
*/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c3feccb99ff5..4cdbae6f4e61 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -63,6 +63,13 @@ TARGETS += zram
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
+# User can optionally provide a TARGETS skiplist.
+SKIP_TARGETS ?=
+ifneq ($(SKIP_TARGETS),)
+ TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
+ override TARGETS := $(TMP)
+endif
+
# Clear LDFLAGS and MAKEFLAGS if called from main
# Makefile to avoid test build failures when test
# Makefile doesn't have explicit build rules.
@@ -171,9 +178,12 @@ run_pstore_crash:
# 1. output_dir=kernel_src
# 2. a separate output directory is specified using O= KBUILD_OUTPUT
# 3. a separate output directory is specified using KBUILD_OUTPUT
+# Avoid conflict with INSTALL_PATH set by the main Makefile
#
-INSTALL_PATH ?= $(BUILD)/install
-INSTALL_PATH := $(abspath $(INSTALL_PATH))
+KSFT_INSTALL_PATH ?= $(BUILD)/kselftest_install
+KSFT_INSTALL_PATH := $(abspath $(KSFT_INSTALL_PATH))
+# Avoid changing the rest of the logic here and lib.mk.
+INSTALL_PATH := $(KSFT_INSTALL_PATH)
ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
install: all
@@ -198,11 +208,16 @@ ifdef INSTALL_PATH
echo " cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
echo "fi" >> $(ALL_SCRIPT)
+ @# While building run_kselftest.sh skip also non-existent TARGET dirs:
+ @# they could be the result of a build failure and should NOT be
+ @# included in the generated runlist.
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
+ [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
echo "cd $$TARGET" >> $(ALL_SCRIPT); \
echo -n "run_many" >> $(ALL_SCRIPT); \
+ echo -n "Emit Tests for $$TARGET\n"; \
$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
echo "" >> $(ALL_SCRIPT); \
echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
index 6cbeea7b4bf1..8547ecbdc61f 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
@@ -195,7 +195,7 @@ static void run_test(int cgroup_fd)
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
- goto close_bpf_object;
+ goto close_server_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
index a82da555b1b0..f4cd60d6fba2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
@@ -260,13 +260,14 @@ void test_tcp_rtt(void)
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
- goto close_cgroup_fd;
+ goto close_server_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);
CHECK_FAIL(run_test(cgroup_fd, server_fd));
+close_server_fd:
close(server_fd);
close_cgroup_fd:
close(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
index d23d4da66b83..e2d06191bd35 100755
--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
+++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
@@ -63,6 +63,9 @@ fi
# Setup
tc qdisc add dev lo ingress
+echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter
echo "Testing IPv4..."
# Drops all IP/UDP packets coming from port 9
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index acf7a74f97cd..59ea56945e6c 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -314,15 +314,15 @@ test_gso()
command -v nc >/dev/null 2>&1 || \
{ echo >&2 "nc is not available: skipping TSO tests"; return; }
- # listen on IPv*_DST, capture TCP into $TMPFILE
+ # listen on port 9000, capture TCP into $TMPFILE
if [ "${PROTO}" == "IPv4" ] ; then
IP_DST=${IPv4_DST}
ip netns exec ${NS3} bash -c \
- "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+ "nc -4 -l -p 9000 > ${TMPFILE} &"
elif [ "${PROTO}" == "IPv6" ] ; then
IP_DST=${IPv6_DST}
ip netns exec ${NS3} bash -c \
- "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+ "nc -6 -l -p 9000 > ${TMPFILE} &"
RET=$?
else
echo " test_gso: unknown PROTO: ${PROTO}"
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 00c9020bdda8..84de7bc74f2c 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -3,9 +3,14 @@
#
# Runs a set of tests in a given subdirectory.
export skip_rc=4
+export timeout_rc=124
export logfile=/dev/stdout
export per_test_logging=
+# Defaults for "settings" file fields:
+# "timeout" how many seconds to let each test run before failing.
+export kselftest_default_timeout=45
+
# There isn't a shell-agnostic way to find the path of a sourced file,
# so we must rely on BASE_DIR being set to find other tools.
if [ -z "$BASE_DIR" ]; then
@@ -24,6 +29,16 @@ tap_prefix()
fi
}
+tap_timeout()
+{
+ # Make sure tests will time out if utility is available.
+ if [ -x /usr/bin/timeout ] ; then
+ /usr/bin/timeout "$kselftest_timeout" "$1"
+ else
+ "$1"
+ fi
+}
+
run_one()
{
DIR="$1"
@@ -32,6 +47,18 @@ run_one()
BASENAME_TEST=$(basename $TEST)
+ # Reset any "settings"-file variables.
+ export kselftest_timeout="$kselftest_default_timeout"
+ # Load per-test-directory kselftest "settings" file.
+ settings="$BASE_DIR/$DIR/settings"
+ if [ -r "$settings" ] ; then
+ while read line ; do
+ field=$(echo "$line" | cut -d= -f1)
+ value=$(echo "$line" | cut -d= -f2-)
+ eval "kselftest_$field"="$value"
+ done < "$settings"
+ fi
+
TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST"
echo "# $TEST_HDR_MSG"
if [ ! -x "$TEST" ]; then
@@ -44,14 +71,17 @@ run_one()
echo "not ok $test_num $TEST_HDR_MSG"
else
cd `dirname $TEST` > /dev/null
- (((((./$BASENAME_TEST 2>&1; echo $? >&3) |
+ ((((( tap_timeout ./$BASENAME_TEST 2>&1; echo $? >&3) |
tap_prefix >&4) 3>&1) |
(read xs; exit $xs)) 4>>"$logfile" &&
echo "ok $test_num $TEST_HDR_MSG") ||
- (if [ $? -eq $skip_rc ]; then \
+ (rc=$?; \
+ if [ $rc -eq $skip_rc ]; then \
echo "not ok $test_num $TEST_HDR_MSG # SKIP"
+ elif [ $rc -eq $timeout_rc ]; then \
+ echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT"
else
- echo "not ok $test_num $TEST_HDR_MSG"
+ echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
fi)
cd - >/dev/null
fi
diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh
index ec304463883c..e2e1911d62d5 100755
--- a/tools/testing/selftests/kselftest_install.sh
+++ b/tools/testing/selftests/kselftest_install.sh
@@ -24,12 +24,12 @@ main()
echo "$0: Installing in specified location - $install_loc ..."
fi
- install_dir=$install_loc/kselftest
+ install_dir=$install_loc/kselftest_install
# Create install directory
mkdir -p $install_dir
# Build tests
- INSTALL_PATH=$install_dir make install
+ KSFT_INSTALL_PATH=$install_dir make install
}
main "$@"
diff --git a/tools/testing/selftests/powerpc/mm/tlbie_test.c b/tools/testing/selftests/powerpc/mm/tlbie_test.c
index 9868a5ddd847..f85a0938ab25 100644
--- a/tools/testing/selftests/powerpc/mm/tlbie_test.c
+++ b/tools/testing/selftests/powerpc/mm/tlbie_test.c
@@ -636,7 +636,7 @@ int main(int argc, char *argv[])
nrthreads = strtoul(optarg, NULL, 10);
break;
case 'l':
- strncpy(logdir, optarg, LOGDIR_NAME_SIZE);
+ strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1);
break;
case 't':
run_time = strtoul(optarg, NULL, 10);
diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
new file mode 100644
index 000000000000..ba4d85f74cd6
--- /dev/null
+++ b/tools/testing/selftests/rtc/settings
@@ -0,0 +1 @@
+timeout=90
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index c0534e298b51..cb3fc09645c4 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -37,7 +37,7 @@ int main(int argc, char **argv)
char *file = "/dev/zero";
char *p;
- while ((opt = getopt(argc, argv, "m:r:n:f:tTLUSH")) != -1) {
+ while ((opt = getopt(argc, argv, "m:r:n:f:tTLUwSH")) != -1) {
switch (opt) {
case 'm':
size = atoi(optarg) * MB;
diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index afff120c7be6..f45e510500c0 100644
--- a/tools/testing/selftests/watchdog/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
@@ -19,7 +19,7 @@
int fd;
const char v = 'V';
-static const char sopts[] = "bdehp:t:Tn:NLf:";
+static const char sopts[] = "bdehp:t:Tn:NLf:i";
static const struct option lopts[] = {
{"bootstatus", no_argument, NULL, 'b'},
{"disable", no_argument, NULL, 'd'},
@@ -32,6 +32,7 @@ static const struct option lopts[] = {
{"getpretimeout", no_argument, NULL, 'N'},
{"gettimeleft", no_argument, NULL, 'L'},
{"file", required_argument, NULL, 'f'},
+ {"info", no_argument, NULL, 'i'},
{NULL, no_argument, NULL, 0x0}
};
@@ -72,6 +73,7 @@ static void usage(char *progname)
printf("Usage: %s [options]\n", progname);
printf(" -f, --file\t\tOpen watchdog device file\n");
printf("\t\t\tDefault is /dev/watchdog\n");
+ printf(" -i, --info\t\tShow watchdog_info\n");
printf(" -b, --bootstatus\tGet last boot status (Watchdog/POR)\n");
printf(" -d, --disable\t\tTurn off the watchdog timer\n");
printf(" -e, --enable\t\tTurn on the watchdog timer\n");
@@ -97,6 +99,7 @@ int main(int argc, char *argv[])
int c;
int oneshot = 0;
char *file = "/dev/watchdog";
+ struct watchdog_info info;
setbuf(stdout, NULL);
@@ -118,6 +121,16 @@ int main(int argc, char *argv[])
exit(-1);
}
+ /*
+ * Validate that `file` is a watchdog device
+ */
+ ret = ioctl(fd, WDIOC_GETSUPPORT, &info);
+ if (ret) {
+ printf("WDIOC_GETSUPPORT error '%s'\n", strerror(errno));
+ close(fd);
+ exit(ret);
+ }
+
optind = 0;
while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
@@ -205,6 +218,18 @@ int main(int argc, char *argv[])
case 'f':
/* Handled above */
break;
+ case 'i':
+ /*
+ * watchdog_info was obtained as part of file open
+ * validation. So we just show it here.
+ */
+ oneshot = 1;
+ printf("watchdog_info:\n");
+ printf(" identity:\t\t%s\n", info.identity);
+ printf(" firmware_version:\t%u\n",
+ info.firmware_version);
+ printf(" options:\t\t%08x\n", info.options);
+ break;
default:
usage(argv[0]);
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/tools/virtio/crypto/hash.h
index e69de29bb2d1..e69de29bb2d1 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/tools/virtio/crypto/hash.h
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index f91aeb5fe571..8f41cd6bd5c0 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -29,4 +29,6 @@ enum dma_data_direction {
#define dma_unmap_single(...) do { } while (0)
#define dma_unmap_page(...) do { } while (0)
+#define dma_max_mapping_size(...) SIZE_MAX
+
#endif
diff --git a/tools/virtio/xen/xen.h b/tools/virtio/xen/xen.h
new file mode 100644
index 000000000000..f569387d1403
--- /dev/null
+++ b/tools/virtio/xen/xen.h
@@ -0,0 +1,6 @@
+#ifndef XEN_XEN_STUB_H
+#define XEN_XEN_STUB_H
+
+#define xen_domain() 0
+
+#endif